claude-mpm 4.3.20__py3-none-any.whl → 4.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/agent_loader.py +2 -2
  3. claude_mpm/agents/agent_loader_integration.py +2 -2
  4. claude_mpm/agents/async_agent_loader.py +2 -2
  5. claude_mpm/agents/base_agent_loader.py +2 -2
  6. claude_mpm/agents/frontmatter_validator.py +2 -2
  7. claude_mpm/agents/system_agent_config.py +2 -2
  8. claude_mpm/agents/templates/data_engineer.json +1 -2
  9. claude_mpm/cli/commands/doctor.py +2 -2
  10. claude_mpm/cli/commands/mpm_init.py +560 -47
  11. claude_mpm/cli/commands/mpm_init_handler.py +6 -0
  12. claude_mpm/cli/parsers/mpm_init_parser.py +39 -1
  13. claude_mpm/cli/startup_logging.py +11 -9
  14. claude_mpm/commands/mpm-init.md +76 -12
  15. claude_mpm/config/agent_config.py +2 -2
  16. claude_mpm/config/paths.py +2 -2
  17. claude_mpm/core/agent_name_normalizer.py +2 -2
  18. claude_mpm/core/config.py +2 -1
  19. claude_mpm/core/config_aliases.py +2 -2
  20. claude_mpm/core/file_utils.py +1 -0
  21. claude_mpm/core/log_manager.py +2 -2
  22. claude_mpm/core/tool_access_control.py +2 -2
  23. claude_mpm/core/unified_agent_registry.py +2 -2
  24. claude_mpm/core/unified_paths.py +2 -2
  25. claude_mpm/experimental/cli_enhancements.py +3 -2
  26. claude_mpm/hooks/base_hook.py +2 -2
  27. claude_mpm/hooks/instruction_reinforcement.py +2 -2
  28. claude_mpm/hooks/memory_integration_hook.py +1 -1
  29. claude_mpm/hooks/validation_hooks.py +2 -2
  30. claude_mpm/scripts/mpm_doctor.py +2 -2
  31. claude_mpm/services/agents/loading/agent_profile_loader.py +2 -2
  32. claude_mpm/services/agents/loading/base_agent_manager.py +2 -2
  33. claude_mpm/services/agents/loading/framework_agent_loader.py +2 -2
  34. claude_mpm/services/agents/management/agent_capabilities_generator.py +2 -2
  35. claude_mpm/services/agents/management/agent_management_service.py +2 -2
  36. claude_mpm/services/agents/memory/content_manager.py +5 -2
  37. claude_mpm/services/agents/memory/memory_categorization_service.py +5 -2
  38. claude_mpm/services/agents/memory/memory_file_service.py +28 -6
  39. claude_mpm/services/agents/memory/memory_format_service.py +5 -2
  40. claude_mpm/services/agents/memory/memory_limits_service.py +4 -2
  41. claude_mpm/services/agents/registry/deployed_agent_discovery.py +2 -2
  42. claude_mpm/services/agents/registry/modification_tracker.py +4 -4
  43. claude_mpm/services/async_session_logger.py +2 -1
  44. claude_mpm/services/claude_session_logger.py +2 -2
  45. claude_mpm/services/core/path_resolver.py +3 -2
  46. claude_mpm/services/diagnostics/diagnostic_runner.py +4 -3
  47. claude_mpm/services/event_bus/direct_relay.py +2 -1
  48. claude_mpm/services/event_bus/event_bus.py +2 -1
  49. claude_mpm/services/event_bus/relay.py +2 -2
  50. claude_mpm/services/framework_claude_md_generator/content_assembler.py +2 -2
  51. claude_mpm/services/infrastructure/daemon_manager.py +2 -2
  52. claude_mpm/services/memory/cache/simple_cache.py +2 -2
  53. claude_mpm/services/project/archive_manager.py +981 -0
  54. claude_mpm/services/project/documentation_manager.py +536 -0
  55. claude_mpm/services/project/enhanced_analyzer.py +491 -0
  56. claude_mpm/services/project/project_organizer.py +904 -0
  57. claude_mpm/services/response_tracker.py +2 -2
  58. claude_mpm/services/socketio/handlers/connection.py +14 -33
  59. claude_mpm/services/socketio/server/eventbus_integration.py +2 -2
  60. claude_mpm/services/unified/__init__.py +65 -0
  61. claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
  62. claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
  63. claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
  64. claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
  65. claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
  66. claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
  67. claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
  68. claude_mpm/services/unified/deployment_strategies/base.py +557 -0
  69. claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
  70. claude_mpm/services/unified/deployment_strategies/local.py +594 -0
  71. claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
  72. claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
  73. claude_mpm/services/unified/interfaces.py +499 -0
  74. claude_mpm/services/unified/migration.py +532 -0
  75. claude_mpm/services/unified/strategies.py +551 -0
  76. claude_mpm/services/unified/unified_analyzer.py +534 -0
  77. claude_mpm/services/unified/unified_config.py +688 -0
  78. claude_mpm/services/unified/unified_deployment.py +470 -0
  79. claude_mpm/services/version_control/version_parser.py +5 -4
  80. claude_mpm/storage/state_storage.py +2 -2
  81. claude_mpm/utils/agent_dependency_loader.py +49 -0
  82. claude_mpm/utils/common.py +542 -0
  83. claude_mpm/utils/database_connector.py +298 -0
  84. claude_mpm/utils/error_handler.py +2 -1
  85. claude_mpm/utils/log_cleanup.py +2 -2
  86. claude_mpm/utils/path_operations.py +2 -2
  87. claude_mpm/utils/robust_installer.py +56 -0
  88. claude_mpm/utils/session_logging.py +2 -2
  89. claude_mpm/utils/subprocess_utils.py +2 -2
  90. claude_mpm/validation/agent_validator.py +2 -2
  91. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
  92. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +96 -71
  93. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
  94. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
  95. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
  96. {claude_mpm-4.3.20.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,804 @@
1
+ """
2
+ Performance Analyzer Strategy Implementation
3
+ ============================================
4
+
5
+ Analyzes code for performance bottlenecks and optimization opportunities.
6
+ Consolidates performance analysis functionality from multiple services.
7
+
8
+ Author: Claude MPM Development Team
9
+ Created: 2025-01-26
10
+ """
11
+
12
+ import ast
13
+ import re
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional, Set, Tuple
16
+
17
+ from claude_mpm.core.logging_utils import get_logger
18
+
19
+ from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
25
+ """
26
+ Strategy for analyzing performance bottlenecks and optimization opportunities.
27
+
28
+ Consolidates:
29
+ - Algorithm complexity analysis
30
+ - Database query optimization
31
+ - Memory usage patterns
32
+ - I/O operation efficiency
33
+ - Caching opportunities
34
+ """
35
+
36
+ # Performance anti-patterns
37
+ PERFORMANCE_PATTERNS = {
38
+ "n_plus_one_query": {
39
+ "patterns": [
40
+ r'for .* in .*:\s*\n.*\.(get|filter|select|find)',
41
+ r'\.map\s*\([^)]*=>\s*[^)]*fetch',
42
+ ],
43
+ "severity": "high",
44
+ "description": "Potential N+1 query problem",
45
+ "category": "database",
46
+ },
47
+ "unnecessary_loop": {
48
+ "patterns": [
49
+ r'for .* in .*:\s*\n\s*for .* in .*:\s*\n\s*for .* in .*:',
50
+ r'\.forEach\s*\([^)]*\)\s*{\s*[^}]*\.forEach',
51
+ ],
52
+ "severity": "medium",
53
+ "description": "Triple nested loop detected",
54
+ "category": "algorithm",
55
+ },
56
+ "string_concatenation_loop": {
57
+ "patterns": [
58
+ r'for .* in .*:\s*\n.*\+=\s*["\']',
59
+ r'\.forEach\s*\([^)]*\)\s*{\s*[^}]*\+=',
60
+ ],
61
+ "severity": "medium",
62
+ "description": "String concatenation in loop",
63
+ "category": "memory",
64
+ },
65
+ "synchronous_io": {
66
+ "patterns": [
67
+ r'open\s*\([^)]*\)\.read\s*\(',
68
+ r'fs\.readFileSync\s*\(',
69
+ r'requests\.get\s*\([^)]*\)\.text',
70
+ ],
71
+ "severity": "medium",
72
+ "description": "Synchronous I/O operation",
73
+ "category": "io",
74
+ },
75
+ "missing_index": {
76
+ "patterns": [
77
+ r'SELECT .* FROM .* WHERE .* LIKE .*%',
78
+ r'\.find\s*\(\s*{\s*[^}]*:\s*{\s*\$regex',
79
+ ],
80
+ "severity": "high",
81
+ "description": "Potentially unindexed database query",
82
+ "category": "database",
83
+ },
84
+ }
85
+
86
+ # Algorithm complexity indicators
87
+ COMPLEXITY_INDICATORS = {
88
+ "quadratic": ["nested_loops", "bubble_sort", "selection_sort"],
89
+ "exponential": ["recursive_fibonacci", "recursive_factorial"],
90
+ "linear": ["single_loop", "map", "filter"],
91
+ "logarithmic": ["binary_search", "divide_conquer"],
92
+ "constant": ["direct_access", "hash_lookup"],
93
+ }
94
+
95
+ # Memory usage patterns
96
+ MEMORY_PATTERNS = {
97
+ "memory_leak": {
98
+ "patterns": [
99
+ r'global\s+\w+\s*=',
100
+ r'window\.\w+\s*=',
101
+ r'self\.\w+\s*=\s*\[\]',
102
+ ],
103
+ "description": "Potential memory leak from global variable",
104
+ },
105
+ "large_data_structure": {
106
+ "patterns": [
107
+ r'\[\s*\*\s*range\s*\(\s*\d{6,}',
108
+ r'Array\s*\(\s*\d{6,}\s*\)',
109
+ ],
110
+ "description": "Large data structure allocation",
111
+ },
112
+ "inefficient_copy": {
113
+ "patterns": [
114
+ r'deepcopy\s*\(',
115
+ r'JSON\.parse\s*\(\s*JSON\.stringify',
116
+ r'\.slice\s*\(\s*\)\.map',
117
+ ],
118
+ "description": "Inefficient data copying",
119
+ },
120
+ }
121
+
122
+ def __init__(self):
123
+ """Initialize performance analyzer strategy."""
124
+ metadata = StrategyMetadata(
125
+ name="PerformanceAnalyzer",
126
+ description="Analyzes performance bottlenecks and optimization opportunities",
127
+ supported_types=["file", "directory", "function", "query"],
128
+ supported_operations=["analyze", "profile", "optimize", "benchmark"],
129
+ priority=StrategyPriority.HIGH,
130
+ tags={"performance", "optimization", "bottlenecks", "profiling"},
131
+ )
132
+ super().__init__(metadata)
133
+
134
+ self._optimization_cache = {}
135
+
136
+ def can_handle(self, context: StrategyContext) -> bool:
137
+ """Check if strategy can handle the given context."""
138
+ return (
139
+ context.target_type in self.metadata.supported_types
140
+ and context.operation in self.metadata.supported_operations
141
+ )
142
+
143
+ def validate_input(self, input_data: Any) -> List[str]:
144
+ """Validate input data for strategy."""
145
+ errors = []
146
+
147
+ if not input_data:
148
+ errors.append("Input data is required")
149
+ return errors
150
+
151
+ if isinstance(input_data, (str, Path)):
152
+ path = Path(input_data)
153
+ if not path.exists():
154
+ errors.append(f"Path does not exist: {path}")
155
+ elif not isinstance(input_data, (dict, ast.AST)):
156
+ errors.append(f"Invalid input type: {type(input_data).__name__}")
157
+
158
+ return errors
159
+
160
+ def analyze(
161
+ self, target: Any, options: Optional[Dict[str, Any]] = None
162
+ ) -> Dict[str, Any]:
163
+ """
164
+ Execute performance analysis on target.
165
+
166
+ Args:
167
+ target: File, directory, or code to analyze
168
+ options: Analysis options (profile_depth, check_queries, etc.)
169
+
170
+ Returns:
171
+ Analysis results with performance findings
172
+ """
173
+ options = options or {}
174
+
175
+ if isinstance(target, (str, Path)):
176
+ target_path = Path(target)
177
+
178
+ if target_path.is_file():
179
+ return self._analyze_file(target_path, options)
180
+ elif target_path.is_dir():
181
+ return self._analyze_directory(target_path, options)
182
+ elif isinstance(target, ast.AST):
183
+ return self._analyze_ast_performance(target, options)
184
+
185
+ return {
186
+ "status": "error",
187
+ "message": f"Unsupported target type: {type(target).__name__}",
188
+ }
189
+
190
+ def _analyze_file(self, file_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
191
+ """Analyze a single file for performance issues."""
192
+ results = {
193
+ "status": "success",
194
+ "type": "file",
195
+ "path": str(file_path),
196
+ "issues": [],
197
+ "optimizations": [],
198
+ "complexity": {},
199
+ "performance_score": 100,
200
+ }
201
+
202
+ try:
203
+ content = file_path.read_text(encoding="utf-8")
204
+
205
+ # Scan for performance patterns
206
+ issues = self._scan_performance_patterns(content, file_path)
207
+ results["issues"].extend(issues)
208
+
209
+ # Check memory usage patterns
210
+ memory_issues = self._scan_memory_patterns(content, file_path)
211
+ results["issues"].extend(memory_issues)
212
+
213
+ # Language-specific analysis
214
+ if file_path.suffix == ".py":
215
+ python_analysis = self._analyze_python_performance(content, file_path)
216
+ results["complexity"] = python_analysis.get("complexity", {})
217
+ results["issues"].extend(python_analysis.get("issues", []))
218
+ results["optimizations"].extend(python_analysis.get("optimizations", []))
219
+
220
+ elif file_path.suffix in [".js", ".jsx", ".ts", ".tsx"]:
221
+ js_analysis = self._analyze_javascript_performance(content, file_path)
222
+ results["issues"].extend(js_analysis.get("issues", []))
223
+ results["optimizations"].extend(js_analysis.get("optimizations", []))
224
+
225
+ # Calculate performance score
226
+ results["performance_score"] = self._calculate_performance_score(results)
227
+
228
+ # Generate optimization recommendations
229
+ if not results["optimizations"]:
230
+ results["optimizations"] = self._generate_optimizations(results["issues"])
231
+
232
+ except Exception as e:
233
+ logger.error(f"Error analyzing file {file_path}: {e}")
234
+ results["status"] = "error"
235
+ results["error"] = str(e)
236
+
237
+ return results
238
+
239
+ def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
240
+ """Analyze all files in a directory for performance issues."""
241
+ results = {
242
+ "status": "success",
243
+ "type": "directory",
244
+ "path": str(dir_path),
245
+ "files_analyzed": 0,
246
+ "total_issues": 0,
247
+ "issues_by_category": {},
248
+ "files": [],
249
+ "performance_score": 100,
250
+ }
251
+
252
+ # Define file extensions to analyze
253
+ analyzable_extensions = {
254
+ ".py", ".js", ".jsx", ".ts", ".tsx", ".java", ".cs",
255
+ ".go", ".rs", ".cpp", ".c", ".sql",
256
+ }
257
+
258
+ files_with_issues = []
259
+ total_score = 0
260
+
261
+ # Analyze each file
262
+ for file_path in dir_path.rglob("*"):
263
+ if file_path.is_file() and file_path.suffix in analyzable_extensions:
264
+ # Skip common ignore patterns
265
+ if any(part.startswith(".") for part in file_path.parts):
266
+ continue
267
+ if any(ignore in file_path.parts for ignore in ["node_modules", "__pycache__", "dist", "build"]):
268
+ continue
269
+
270
+ file_result = self._analyze_file(file_path, options)
271
+ if file_result["status"] == "success":
272
+ results["files_analyzed"] += 1
273
+ total_score += file_result["performance_score"]
274
+
275
+ if file_result["issues"]:
276
+ results["files"].append(file_result)
277
+ results["total_issues"] += len(file_result["issues"])
278
+
279
+ # Categorize issues
280
+ for issue in file_result["issues"]:
281
+ category = issue.get("category", "unknown")
282
+ results["issues_by_category"][category] = \
283
+ results["issues_by_category"].get(category, 0) + 1
284
+
285
+ # Calculate average performance score
286
+ if results["files_analyzed"] > 0:
287
+ results["performance_score"] = total_score / results["files_analyzed"]
288
+
289
+ # Add summary and recommendations
290
+ results["summary"] = self._generate_performance_summary(results)
291
+ results["recommendations"] = self._generate_directory_recommendations(results)
292
+
293
+ return results
294
+
295
+ def _scan_performance_patterns(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
296
+ """Scan for performance anti-patterns."""
297
+ issues = []
298
+
299
+ for pattern_name, pattern_info in self.PERFORMANCE_PATTERNS.items():
300
+ for pattern in pattern_info["patterns"]:
301
+ matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
302
+ for match in matches:
303
+ line_num = content[:match.start()].count("\n") + 1
304
+
305
+ issues.append({
306
+ "type": pattern_name,
307
+ "severity": pattern_info["severity"],
308
+ "category": pattern_info["category"],
309
+ "description": pattern_info["description"],
310
+ "file": str(file_path),
311
+ "line": line_num,
312
+ "code": match.group(0)[:100],
313
+ })
314
+
315
+ return issues
316
+
317
+ def _scan_memory_patterns(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
318
+ """Scan for memory usage issues."""
319
+ issues = []
320
+
321
+ for pattern_name, pattern_info in self.MEMORY_PATTERNS.items():
322
+ for pattern in pattern_info["patterns"]:
323
+ matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
324
+ for match in matches:
325
+ line_num = content[:match.start()].count("\n") + 1
326
+
327
+ issues.append({
328
+ "type": f"memory_{pattern_name}",
329
+ "severity": "medium",
330
+ "category": "memory",
331
+ "description": pattern_info["description"],
332
+ "file": str(file_path),
333
+ "line": line_num,
334
+ "code": match.group(0),
335
+ })
336
+
337
+ return issues
338
+
339
+ def _analyze_python_performance(self, content: str, file_path: Path) -> Dict[str, Any]:
340
+ """Perform Python-specific performance analysis."""
341
+ results = {
342
+ "complexity": {},
343
+ "issues": [],
344
+ "optimizations": [],
345
+ }
346
+
347
+ try:
348
+ tree = ast.parse(content)
349
+
350
+ # Analyze function complexities
351
+ class PerformanceVisitor(ast.NodeVisitor):
352
+ def __init__(self):
353
+ self.current_function = None
354
+ self.loop_depth = 0
355
+ self.complexities = {}
356
+
357
+ def visit_FunctionDef(self, node):
358
+ old_function = self.current_function
359
+ self.current_function = node.name
360
+ old_depth = self.loop_depth
361
+ self.loop_depth = 0
362
+
363
+ # Calculate complexity
364
+ complexity = self._calculate_complexity(node)
365
+ self.complexities[node.name] = complexity
366
+
367
+ # Check for performance issues
368
+ if complexity["time_complexity"] in ["O(n^2)", "O(n^3)", "O(2^n)"]:
369
+ results["issues"].append({
370
+ "type": "high_complexity",
371
+ "severity": "high",
372
+ "category": "algorithm",
373
+ "description": f"Function '{node.name}' has {complexity['time_complexity']} complexity",
374
+ "file": str(file_path),
375
+ "line": node.lineno,
376
+ "code": node.name,
377
+ })
378
+
379
+ self.generic_visit(node)
380
+ self.current_function = old_function
381
+ self.loop_depth = old_depth
382
+
383
+ def visit_For(self, node):
384
+ self.loop_depth += 1
385
+ if self.loop_depth > 2:
386
+ results["issues"].append({
387
+ "type": "deep_nesting",
388
+ "severity": "medium",
389
+ "category": "algorithm",
390
+ "description": f"Deep loop nesting (level {self.loop_depth})",
391
+ "file": str(file_path),
392
+ "line": node.lineno,
393
+ "code": f"Loop depth: {self.loop_depth}",
394
+ })
395
+ self.generic_visit(node)
396
+ self.loop_depth -= 1
397
+
398
+ def visit_While(self, node):
399
+ self.visit_For(node) # Treat while loops similarly
400
+
401
+ def visit_ListComp(self, node):
402
+ # Check for nested list comprehensions
403
+ nested_comps = sum(
404
+ 1 for child in ast.walk(node)
405
+ if isinstance(child, (ast.ListComp, ast.SetComp, ast.DictComp))
406
+ ) - 1
407
+
408
+ if nested_comps > 1:
409
+ results["optimizations"].append({
410
+ "type": "nested_comprehension",
411
+ "description": "Consider breaking down nested comprehensions for clarity",
412
+ "file": str(file_path),
413
+ "line": node.lineno,
414
+ })
415
+
416
+ def _calculate_complexity(self, node):
417
+ """Calculate time and space complexity of a function."""
418
+ loop_count = sum(
419
+ 1 for n in ast.walk(node)
420
+ if isinstance(n, (ast.For, ast.While))
421
+ )
422
+
423
+ # Detect nested loops
424
+ max_nesting = 0
425
+ for child in ast.walk(node):
426
+ if isinstance(child, (ast.For, ast.While)):
427
+ nesting = self._get_loop_nesting(child, node)
428
+ max_nesting = max(max_nesting, nesting)
429
+
430
+ # Estimate complexity
431
+ if max_nesting >= 3:
432
+ time_complexity = f"O(n^{max_nesting})"
433
+ elif max_nesting == 2:
434
+ time_complexity = "O(n^2)"
435
+ elif max_nesting == 1:
436
+ time_complexity = "O(n)"
437
+ else:
438
+ time_complexity = "O(1)"
439
+
440
+ return {
441
+ "time_complexity": time_complexity,
442
+ "loop_count": loop_count,
443
+ "max_nesting": max_nesting,
444
+ }
445
+
446
+ def _get_loop_nesting(self, loop_node, function_node):
447
+ """Get the nesting level of a loop."""
448
+ nesting = 1
449
+ for parent in ast.walk(function_node):
450
+ for child in ast.iter_child_nodes(parent):
451
+ if child is loop_node:
452
+ return nesting
453
+ if isinstance(child, (ast.For, ast.While)):
454
+ if loop_node in ast.walk(child):
455
+ nesting += 1
456
+ return nesting
457
+
458
+ visitor = PerformanceVisitor()
459
+ visitor.visit(tree)
460
+ results["complexity"] = visitor.complexities
461
+
462
+ # Check for list operations that could be optimized
463
+ self._check_python_optimizations(tree, results, file_path)
464
+
465
+ except SyntaxError:
466
+ pass
467
+
468
+ return results
469
+
470
+ def _check_python_optimizations(self, tree: ast.AST, results: Dict, file_path: Path):
471
+ """Check for Python-specific optimization opportunities."""
472
+ class OptimizationVisitor(ast.NodeVisitor):
473
+ def visit_For(self, node):
474
+ # Check for range(len()) anti-pattern
475
+ if isinstance(node.iter, ast.Call):
476
+ if (isinstance(node.iter.func, ast.Name) and
477
+ node.iter.func.id == "range" and
478
+ len(node.iter.args) == 1):
479
+
480
+ if isinstance(node.iter.args[0], ast.Call):
481
+ if (isinstance(node.iter.args[0].func, ast.Name) and
482
+ node.iter.args[0].func.id == "len"):
483
+
484
+ results["optimizations"].append({
485
+ "type": "range_len_pattern",
486
+ "description": "Use enumerate() instead of range(len())",
487
+ "file": str(file_path),
488
+ "line": node.lineno,
489
+ })
490
+
491
+ self.generic_visit(node)
492
+
493
+ def visit_ListComp(self, node):
494
+ # Check if list comprehension could be a generator
495
+ parent = None
496
+ for p in ast.walk(tree):
497
+ for child in ast.iter_child_nodes(p):
498
+ if child is node:
499
+ parent = p
500
+ break
501
+
502
+ if parent and isinstance(parent, ast.Call):
503
+ if (isinstance(parent.func, ast.Name) and
504
+ parent.func.id in ["sum", "any", "all", "min", "max"]):
505
+
506
+ results["optimizations"].append({
507
+ "type": "generator_opportunity",
508
+ "description": "Consider using generator expression instead of list comprehension",
509
+ "file": str(file_path),
510
+ "line": node.lineno,
511
+ })
512
+
513
+ self.generic_visit(node)
514
+
515
+ visitor = OptimizationVisitor()
516
+ visitor.visit(tree)
517
+
518
+ def _analyze_javascript_performance(self, content: str, file_path: Path) -> Dict[str, Any]:
519
+ """Perform JavaScript-specific performance analysis."""
520
+ results = {
521
+ "issues": [],
522
+ "optimizations": [],
523
+ }
524
+
525
+ # Check for common JS performance issues
526
+ js_patterns = {
527
+ "dom_in_loop": {
528
+ "pattern": r'for\s*\([^)]*\)\s*{\s*[^}]*document\.(getElementById|querySelector)',
529
+ "description": "DOM access inside loop - consider caching",
530
+ "severity": "high",
531
+ },
532
+ "missing_debounce": {
533
+ "pattern": r'addEventListener\s*\(\s*["\']scroll["\']',
534
+ "description": "Scroll event without debouncing",
535
+ "severity": "medium",
536
+ },
537
+ "sync_ajax": {
538
+ "pattern": r'async\s*:\s*false',
539
+ "description": "Synchronous AJAX request",
540
+ "severity": "high",
541
+ },
542
+ "inefficient_array": {
543
+ "pattern": r'\.shift\s*\(\s*\)',
544
+ "description": "Array.shift() is O(n) - consider using different data structure",
545
+ "severity": "medium",
546
+ },
547
+ }
548
+
549
+ for issue_name, issue_info in js_patterns.items():
550
+ matches = re.finditer(issue_info["pattern"], content, re.IGNORECASE | re.MULTILINE)
551
+ for match in matches:
552
+ line_num = content[:match.start()].count("\n") + 1
553
+
554
+ results["issues"].append({
555
+ "type": f"js_{issue_name}",
556
+ "severity": issue_info["severity"],
557
+ "category": "performance",
558
+ "description": issue_info["description"],
559
+ "file": str(file_path),
560
+ "line": line_num,
561
+ "code": match.group(0)[:100],
562
+ })
563
+
564
+ # Check for optimization opportunities
565
+ if "forEach" in content and "return" not in content:
566
+ results["optimizations"].append({
567
+ "type": "use_for_of",
568
+ "description": "Consider using for...of instead of forEach for better performance",
569
+ "file": str(file_path),
570
+ })
571
+
572
+ if ".map(" in content and ".filter(" in content:
573
+ results["optimizations"].append({
574
+ "type": "combine_array_methods",
575
+ "description": "Consider combining map and filter operations for better performance",
576
+ "file": str(file_path),
577
+ })
578
+
579
+ return results
580
+
581
+ def _analyze_ast_performance(self, node: ast.AST, options: Dict[str, Any]) -> Dict[str, Any]:
582
+ """Analyze performance of an AST node."""
583
+ results = {
584
+ "status": "success",
585
+ "type": "ast",
586
+ "complexity": {},
587
+ "issues": [],
588
+ }
589
+
590
+ # Calculate complexity for the node
591
+ if isinstance(node, ast.FunctionDef):
592
+ loop_count = sum(
593
+ 1 for n in ast.walk(node)
594
+ if isinstance(n, (ast.For, ast.While))
595
+ )
596
+
597
+ results["complexity"] = {
598
+ "name": node.name,
599
+ "loop_count": loop_count,
600
+ "line_count": node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0,
601
+ }
602
+
603
+ if loop_count > 3:
604
+ results["issues"].append({
605
+ "type": "excessive_loops",
606
+ "severity": "medium",
607
+ "description": f"Function has {loop_count} loops",
608
+ })
609
+
610
+ return results
611
+
612
+ def _calculate_performance_score(self, results: Dict[str, Any]) -> float:
613
+ """Calculate performance score based on issues found."""
614
+ score = 100.0
615
+
616
+ severity_penalties = {
617
+ "critical": 20,
618
+ "high": 10,
619
+ "medium": 5,
620
+ "low": 2,
621
+ }
622
+
623
+ for issue in results.get("issues", []):
624
+ severity = issue.get("severity", "low")
625
+ penalty = severity_penalties.get(severity, 1)
626
+ score -= penalty
627
+
628
+ return max(0, score)
629
+
630
+ def _generate_optimizations(self, issues: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
631
+ """Generate optimization recommendations based on issues."""
632
+ optimizations = []
633
+ categories = set()
634
+
635
+ for issue in issues:
636
+ category = issue.get("category", "unknown")
637
+ categories.add(category)
638
+
639
+ if "database" in categories:
640
+ optimizations.append({
641
+ "type": "database_optimization",
642
+ "description": "Consider adding database indexes and optimizing queries",
643
+ "priority": "high",
644
+ })
645
+
646
+ if "algorithm" in categories:
647
+ optimizations.append({
648
+ "type": "algorithm_optimization",
649
+ "description": "Review algorithm complexity and consider more efficient approaches",
650
+ "priority": "high",
651
+ })
652
+
653
+ if "memory" in categories:
654
+ optimizations.append({
655
+ "type": "memory_optimization",
656
+ "description": "Optimize memory usage by using generators and avoiding large allocations",
657
+ "priority": "medium",
658
+ })
659
+
660
+ if "io" in categories:
661
+ optimizations.append({
662
+ "type": "io_optimization",
663
+ "description": "Use asynchronous I/O operations to improve responsiveness",
664
+ "priority": "medium",
665
+ })
666
+
667
+ return optimizations
668
+
669
+ def _generate_performance_summary(self, results: Dict[str, Any]) -> Dict[str, Any]:
670
+ """Generate summary of performance analysis."""
671
+ return {
672
+ "files_analyzed": results["files_analyzed"],
673
+ "total_issues": results["total_issues"],
674
+ "average_score": results["performance_score"],
675
+ "top_categories": sorted(
676
+ results["issues_by_category"].items(),
677
+ key=lambda x: x[1],
678
+ reverse=True
679
+ )[:3],
680
+ }
681
+
682
+ def _generate_directory_recommendations(self, results: Dict[str, Any]) -> List[str]:
683
+ """Generate recommendations for directory-level performance."""
684
+ recommendations = []
685
+
686
+ issues_by_category = results.get("issues_by_category", {})
687
+
688
+ if issues_by_category.get("database", 0) > 5:
689
+ recommendations.append(
690
+ "Multiple database performance issues detected. Consider implementing query optimization and caching strategies."
691
+ )
692
+
693
+ if issues_by_category.get("algorithm", 0) > 10:
694
+ recommendations.append(
695
+ "High number of algorithmic complexity issues. Review and optimize critical code paths."
696
+ )
697
+
698
+ if issues_by_category.get("memory", 0) > 5:
699
+ recommendations.append(
700
+ "Memory usage issues detected. Implement memory profiling and optimize data structures."
701
+ )
702
+
703
+ if results["performance_score"] < 50:
704
+ recommendations.append(
705
+ "Overall performance score is low. Consider conducting a comprehensive performance audit."
706
+ )
707
+
708
+ if not recommendations:
709
+ recommendations.append(
710
+ "Performance is generally good. Continue monitoring for regressions."
711
+ )
712
+
713
+ return recommendations
714
+
715
+ def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
716
+ """Extract key metrics from analysis results."""
717
+ metrics = {}
718
+
719
+ if analysis_result.get("status") != "success":
720
+ return metrics
721
+
722
+ if analysis_result.get("type") == "file":
723
+ metrics.update({
724
+ "performance_score": analysis_result.get("performance_score", 0),
725
+ "issue_count": len(analysis_result.get("issues", [])),
726
+ "optimization_count": len(analysis_result.get("optimizations", [])),
727
+ })
728
+
729
+ # Add complexity metrics if available
730
+ if "complexity" in analysis_result:
731
+ for func_name, complexity in analysis_result["complexity"].items():
732
+ metrics[f"complexity_{func_name}"] = complexity.get("time_complexity", "O(1)")
733
+
734
+ elif analysis_result.get("type") == "directory":
735
+ metrics.update({
736
+ "files_analyzed": analysis_result.get("files_analyzed", 0),
737
+ "total_issues": analysis_result.get("total_issues", 0),
738
+ "average_score": analysis_result.get("performance_score", 0),
739
+ })
740
+
741
+ # Add category breakdown
742
+ for category, count in analysis_result.get("issues_by_category", {}).items():
743
+ metrics[f"category_{category}"] = count
744
+
745
+ return metrics
746
+
747
+ def compare_results(
748
+ self, baseline: Dict[str, Any], current: Dict[str, Any]
749
+ ) -> Dict[str, Any]:
750
+ """Compare two performance analysis results."""
751
+ comparison = {
752
+ "score_change": 0,
753
+ "issue_changes": {},
754
+ "improvements": [],
755
+ "regressions": [],
756
+ }
757
+
758
+ # Compare performance scores
759
+ baseline_score = baseline.get("performance_score", 100)
760
+ current_score = current.get("performance_score", 100)
761
+ comparison["score_change"] = current_score - baseline_score
762
+
763
+ # Compare issue counts
764
+ baseline_metrics = self.extract_metrics(baseline)
765
+ current_metrics = self.extract_metrics(current)
766
+
767
+ baseline_issues = baseline_metrics.get("total_issues", baseline_metrics.get("issue_count", 0))
768
+ current_issues = current_metrics.get("total_issues", current_metrics.get("issue_count", 0))
769
+
770
+ if current_issues < baseline_issues:
771
+ comparison["improvements"].append(
772
+ f"Reduced performance issues from {baseline_issues} to {current_issues}"
773
+ )
774
+ elif current_issues > baseline_issues:
775
+ comparison["regressions"].append(
776
+ f"Performance issues increased from {baseline_issues} to {current_issues}"
777
+ )
778
+
779
+ # Compare by category if available
780
+ for key in baseline_metrics:
781
+ if key.startswith("category_"):
782
+ category = key.replace("category_", "")
783
+ if key in current_metrics:
784
+ baseline_count = baseline_metrics[key]
785
+ current_count = current_metrics[key]
786
+
787
+ if baseline_count != current_count:
788
+ comparison["issue_changes"][category] = {
789
+ "baseline": baseline_count,
790
+ "current": current_count,
791
+ "change": current_count - baseline_count,
792
+ }
793
+
794
+ # Performance score interpretation
795
+ if comparison["score_change"] > 10:
796
+ comparison["improvements"].append(
797
+ f"Significant performance improvement: +{comparison['score_change']:.1f} points"
798
+ )
799
+ elif comparison["score_change"] < -10:
800
+ comparison["regressions"].append(
801
+ f"Significant performance regression: {comparison['score_change']:.1f} points"
802
+ )
803
+
804
+ return comparison