claude-mpm 4.4.3__py3-none-any.whl → 4.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/agent_loader.py +3 -2
  3. claude_mpm/agents/agent_loader_integration.py +2 -1
  4. claude_mpm/agents/async_agent_loader.py +2 -2
  5. claude_mpm/agents/base_agent_loader.py +2 -2
  6. claude_mpm/agents/frontmatter_validator.py +1 -0
  7. claude_mpm/agents/system_agent_config.py +2 -1
  8. claude_mpm/cli/commands/doctor.py +44 -5
  9. claude_mpm/cli/commands/mpm_init.py +116 -62
  10. claude_mpm/cli/parsers/configure_parser.py +3 -1
  11. claude_mpm/cli/startup_logging.py +1 -3
  12. claude_mpm/config/agent_config.py +1 -1
  13. claude_mpm/config/paths.py +2 -1
  14. claude_mpm/core/agent_name_normalizer.py +1 -0
  15. claude_mpm/core/config.py +2 -1
  16. claude_mpm/core/config_aliases.py +2 -1
  17. claude_mpm/core/file_utils.py +0 -1
  18. claude_mpm/core/framework/__init__.py +6 -6
  19. claude_mpm/core/framework/formatters/__init__.py +2 -2
  20. claude_mpm/core/framework/formatters/capability_generator.py +19 -8
  21. claude_mpm/core/framework/formatters/content_formatter.py +8 -3
  22. claude_mpm/core/framework/formatters/context_generator.py +7 -3
  23. claude_mpm/core/framework/loaders/__init__.py +3 -3
  24. claude_mpm/core/framework/loaders/agent_loader.py +7 -3
  25. claude_mpm/core/framework/loaders/file_loader.py +16 -6
  26. claude_mpm/core/framework/loaders/instruction_loader.py +16 -6
  27. claude_mpm/core/framework/loaders/packaged_loader.py +36 -12
  28. claude_mpm/core/framework/processors/__init__.py +2 -2
  29. claude_mpm/core/framework/processors/memory_processor.py +14 -6
  30. claude_mpm/core/framework/processors/metadata_processor.py +5 -5
  31. claude_mpm/core/framework/processors/template_processor.py +12 -6
  32. claude_mpm/core/framework_loader.py +44 -20
  33. claude_mpm/core/log_manager.py +2 -1
  34. claude_mpm/core/tool_access_control.py +1 -0
  35. claude_mpm/core/unified_agent_registry.py +2 -1
  36. claude_mpm/core/unified_paths.py +1 -0
  37. claude_mpm/experimental/cli_enhancements.py +1 -0
  38. claude_mpm/hooks/base_hook.py +1 -0
  39. claude_mpm/hooks/instruction_reinforcement.py +1 -0
  40. claude_mpm/hooks/kuzu_memory_hook.py +20 -13
  41. claude_mpm/hooks/validation_hooks.py +1 -1
  42. claude_mpm/scripts/mpm_doctor.py +1 -0
  43. claude_mpm/services/agents/loading/agent_profile_loader.py +1 -1
  44. claude_mpm/services/agents/loading/base_agent_manager.py +1 -1
  45. claude_mpm/services/agents/loading/framework_agent_loader.py +1 -1
  46. claude_mpm/services/agents/management/agent_capabilities_generator.py +1 -0
  47. claude_mpm/services/agents/management/agent_management_service.py +1 -1
  48. claude_mpm/services/agents/memory/memory_categorization_service.py +0 -1
  49. claude_mpm/services/agents/memory/memory_file_service.py +6 -2
  50. claude_mpm/services/agents/memory/memory_format_service.py +0 -1
  51. claude_mpm/services/agents/registry/deployed_agent_discovery.py +1 -1
  52. claude_mpm/services/async_session_logger.py +1 -1
  53. claude_mpm/services/claude_session_logger.py +1 -0
  54. claude_mpm/services/core/path_resolver.py +1 -0
  55. claude_mpm/services/diagnostics/checks/__init__.py +2 -0
  56. claude_mpm/services/diagnostics/checks/installation_check.py +126 -25
  57. claude_mpm/services/diagnostics/checks/mcp_services_check.py +451 -0
  58. claude_mpm/services/diagnostics/diagnostic_runner.py +3 -0
  59. claude_mpm/services/diagnostics/doctor_reporter.py +259 -32
  60. claude_mpm/services/event_bus/direct_relay.py +2 -1
  61. claude_mpm/services/event_bus/event_bus.py +1 -0
  62. claude_mpm/services/event_bus/relay.py +3 -2
  63. claude_mpm/services/framework_claude_md_generator/content_assembler.py +1 -1
  64. claude_mpm/services/infrastructure/daemon_manager.py +1 -1
  65. claude_mpm/services/mcp_config_manager.py +301 -54
  66. claude_mpm/services/mcp_gateway/core/process_pool.py +62 -23
  67. claude_mpm/services/mcp_gateway/tools/__init__.py +6 -5
  68. claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +3 -1
  69. claude_mpm/services/mcp_gateway/tools/kuzu_memory_service.py +16 -31
  70. claude_mpm/services/memory/cache/simple_cache.py +1 -1
  71. claude_mpm/services/project/archive_manager.py +159 -96
  72. claude_mpm/services/project/documentation_manager.py +64 -45
  73. claude_mpm/services/project/enhanced_analyzer.py +132 -89
  74. claude_mpm/services/project/project_organizer.py +225 -131
  75. claude_mpm/services/response_tracker.py +1 -1
  76. claude_mpm/services/socketio/server/eventbus_integration.py +1 -1
  77. claude_mpm/services/unified/__init__.py +1 -1
  78. claude_mpm/services/unified/analyzer_strategies/__init__.py +3 -3
  79. claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +97 -53
  80. claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +81 -40
  81. claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +277 -178
  82. claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +196 -112
  83. claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +83 -49
  84. claude_mpm/services/unified/config_strategies/__init__.py +111 -126
  85. claude_mpm/services/unified/config_strategies/config_schema.py +157 -111
  86. claude_mpm/services/unified/config_strategies/context_strategy.py +91 -89
  87. claude_mpm/services/unified/config_strategies/error_handling_strategy.py +183 -173
  88. claude_mpm/services/unified/config_strategies/file_loader_strategy.py +160 -152
  89. claude_mpm/services/unified/config_strategies/unified_config_service.py +124 -112
  90. claude_mpm/services/unified/config_strategies/validation_strategy.py +298 -259
  91. claude_mpm/services/unified/deployment_strategies/__init__.py +7 -7
  92. claude_mpm/services/unified/deployment_strategies/base.py +24 -28
  93. claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +168 -88
  94. claude_mpm/services/unified/deployment_strategies/local.py +49 -34
  95. claude_mpm/services/unified/deployment_strategies/utils.py +39 -43
  96. claude_mpm/services/unified/deployment_strategies/vercel.py +30 -24
  97. claude_mpm/services/unified/interfaces.py +0 -26
  98. claude_mpm/services/unified/migration.py +17 -40
  99. claude_mpm/services/unified/strategies.py +9 -26
  100. claude_mpm/services/unified/unified_analyzer.py +48 -44
  101. claude_mpm/services/unified/unified_config.py +21 -19
  102. claude_mpm/services/unified/unified_deployment.py +21 -26
  103. claude_mpm/storage/state_storage.py +1 -0
  104. claude_mpm/utils/agent_dependency_loader.py +18 -6
  105. claude_mpm/utils/common.py +14 -12
  106. claude_mpm/utils/database_connector.py +15 -12
  107. claude_mpm/utils/error_handler.py +1 -0
  108. claude_mpm/utils/log_cleanup.py +1 -0
  109. claude_mpm/utils/path_operations.py +1 -0
  110. claude_mpm/utils/session_logging.py +1 -1
  111. claude_mpm/utils/subprocess_utils.py +1 -0
  112. claude_mpm/validation/agent_validator.py +1 -1
  113. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/METADATA +35 -15
  114. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/RECORD +118 -117
  115. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/WHEEL +0 -0
  116. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/entry_points.txt +0 -0
  117. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/licenses/LICENSE +0 -0
  118. {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.5.dist-info}/top_level.txt +0 -0
@@ -12,11 +12,16 @@ Created: 2025-01-26
12
12
  import ast
13
13
  import re
14
14
  from pathlib import Path
15
- from typing import Any, Dict, List, Optional, Set, Tuple
15
+ from typing import Any, Dict, List, Optional
16
16
 
17
17
  from claude_mpm.core.logging_utils import get_logger
18
18
 
19
- from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
19
+ from ..strategies import (
20
+ AnalyzerStrategy,
21
+ StrategyContext,
22
+ StrategyMetadata,
23
+ StrategyPriority,
24
+ )
20
25
 
21
26
  logger = get_logger(__name__)
22
27
 
@@ -37,8 +42,8 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
37
42
  PERFORMANCE_PATTERNS = {
38
43
  "n_plus_one_query": {
39
44
  "patterns": [
40
- r'for .* in .*:\s*\n.*\.(get|filter|select|find)',
41
- r'\.map\s*\([^)]*=>\s*[^)]*fetch',
45
+ r"for .* in .*:\s*\n.*\.(get|filter|select|find)",
46
+ r"\.map\s*\([^)]*=>\s*[^)]*fetch",
42
47
  ],
43
48
  "severity": "high",
44
49
  "description": "Potential N+1 query problem",
@@ -46,8 +51,8 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
46
51
  },
47
52
  "unnecessary_loop": {
48
53
  "patterns": [
49
- r'for .* in .*:\s*\n\s*for .* in .*:\s*\n\s*for .* in .*:',
50
- r'\.forEach\s*\([^)]*\)\s*{\s*[^}]*\.forEach',
54
+ r"for .* in .*:\s*\n\s*for .* in .*:\s*\n\s*for .* in .*:",
55
+ r"\.forEach\s*\([^)]*\)\s*{\s*[^}]*\.forEach",
51
56
  ],
52
57
  "severity": "medium",
53
58
  "description": "Triple nested loop detected",
@@ -56,7 +61,7 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
56
61
  "string_concatenation_loop": {
57
62
  "patterns": [
58
63
  r'for .* in .*:\s*\n.*\+=\s*["\']',
59
- r'\.forEach\s*\([^)]*\)\s*{\s*[^}]*\+=',
64
+ r"\.forEach\s*\([^)]*\)\s*{\s*[^}]*\+=",
60
65
  ],
61
66
  "severity": "medium",
62
67
  "description": "String concatenation in loop",
@@ -64,9 +69,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
64
69
  },
65
70
  "synchronous_io": {
66
71
  "patterns": [
67
- r'open\s*\([^)]*\)\.read\s*\(',
68
- r'fs\.readFileSync\s*\(',
69
- r'requests\.get\s*\([^)]*\)\.text',
72
+ r"open\s*\([^)]*\)\.read\s*\(",
73
+ r"fs\.readFileSync\s*\(",
74
+ r"requests\.get\s*\([^)]*\)\.text",
70
75
  ],
71
76
  "severity": "medium",
72
77
  "description": "Synchronous I/O operation",
@@ -74,8 +79,8 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
74
79
  },
75
80
  "missing_index": {
76
81
  "patterns": [
77
- r'SELECT .* FROM .* WHERE .* LIKE .*%',
78
- r'\.find\s*\(\s*{\s*[^}]*:\s*{\s*\$regex',
82
+ r"SELECT .* FROM .* WHERE .* LIKE .*%",
83
+ r"\.find\s*\(\s*{\s*[^}]*:\s*{\s*\$regex",
79
84
  ],
80
85
  "severity": "high",
81
86
  "description": "Potentially unindexed database query",
@@ -96,24 +101,24 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
96
101
  MEMORY_PATTERNS = {
97
102
  "memory_leak": {
98
103
  "patterns": [
99
- r'global\s+\w+\s*=',
100
- r'window\.\w+\s*=',
101
- r'self\.\w+\s*=\s*\[\]',
104
+ r"global\s+\w+\s*=",
105
+ r"window\.\w+\s*=",
106
+ r"self\.\w+\s*=\s*\[\]",
102
107
  ],
103
108
  "description": "Potential memory leak from global variable",
104
109
  },
105
110
  "large_data_structure": {
106
111
  "patterns": [
107
- r'\[\s*\*\s*range\s*\(\s*\d{6,}',
108
- r'Array\s*\(\s*\d{6,}\s*\)',
112
+ r"\[\s*\*\s*range\s*\(\s*\d{6,}",
113
+ r"Array\s*\(\s*\d{6,}\s*\)",
109
114
  ],
110
115
  "description": "Large data structure allocation",
111
116
  },
112
117
  "inefficient_copy": {
113
118
  "patterns": [
114
- r'deepcopy\s*\(',
115
- r'JSON\.parse\s*\(\s*JSON\.stringify',
116
- r'\.slice\s*\(\s*\)\.map',
119
+ r"deepcopy\s*\(",
120
+ r"JSON\.parse\s*\(\s*JSON\.stringify",
121
+ r"\.slice\s*\(\s*\)\.map",
117
122
  ],
118
123
  "description": "Inefficient data copying",
119
124
  },
@@ -177,7 +182,7 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
177
182
 
178
183
  if target_path.is_file():
179
184
  return self._analyze_file(target_path, options)
180
- elif target_path.is_dir():
185
+ if target_path.is_dir():
181
186
  return self._analyze_directory(target_path, options)
182
187
  elif isinstance(target, ast.AST):
183
188
  return self._analyze_ast_performance(target, options)
@@ -215,7 +220,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
215
220
  python_analysis = self._analyze_python_performance(content, file_path)
216
221
  results["complexity"] = python_analysis.get("complexity", {})
217
222
  results["issues"].extend(python_analysis.get("issues", []))
218
- results["optimizations"].extend(python_analysis.get("optimizations", []))
223
+ results["optimizations"].extend(
224
+ python_analysis.get("optimizations", [])
225
+ )
219
226
 
220
227
  elif file_path.suffix in [".js", ".jsx", ".ts", ".tsx"]:
221
228
  js_analysis = self._analyze_javascript_performance(content, file_path)
@@ -227,7 +234,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
227
234
 
228
235
  # Generate optimization recommendations
229
236
  if not results["optimizations"]:
230
- results["optimizations"] = self._generate_optimizations(results["issues"])
237
+ results["optimizations"] = self._generate_optimizations(
238
+ results["issues"]
239
+ )
231
240
 
232
241
  except Exception as e:
233
242
  logger.error(f"Error analyzing file {file_path}: {e}")
@@ -236,7 +245,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
236
245
 
237
246
  return results
238
247
 
239
- def _analyze_directory(self, dir_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
248
+ def _analyze_directory(
249
+ self, dir_path: Path, options: Dict[str, Any]
250
+ ) -> Dict[str, Any]:
240
251
  """Analyze all files in a directory for performance issues."""
241
252
  results = {
242
253
  "status": "success",
@@ -251,8 +262,18 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
251
262
 
252
263
  # Define file extensions to analyze
253
264
  analyzable_extensions = {
254
- ".py", ".js", ".jsx", ".ts", ".tsx", ".java", ".cs",
255
- ".go", ".rs", ".cpp", ".c", ".sql",
265
+ ".py",
266
+ ".js",
267
+ ".jsx",
268
+ ".ts",
269
+ ".tsx",
270
+ ".java",
271
+ ".cs",
272
+ ".go",
273
+ ".rs",
274
+ ".cpp",
275
+ ".c",
276
+ ".sql",
256
277
  }
257
278
 
258
279
  files_with_issues = []
@@ -264,7 +285,10 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
264
285
  # Skip common ignore patterns
265
286
  if any(part.startswith(".") for part in file_path.parts):
266
287
  continue
267
- if any(ignore in file_path.parts for ignore in ["node_modules", "__pycache__", "dist", "build"]):
288
+ if any(
289
+ ignore in file_path.parts
290
+ for ignore in ["node_modules", "__pycache__", "dist", "build"]
291
+ ):
268
292
  continue
269
293
 
270
294
  file_result = self._analyze_file(file_path, options)
@@ -279,8 +303,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
279
303
  # Categorize issues
280
304
  for issue in file_result["issues"]:
281
305
  category = issue.get("category", "unknown")
282
- results["issues_by_category"][category] = \
306
+ results["issues_by_category"][category] = (
283
307
  results["issues_by_category"].get(category, 0) + 1
308
+ )
284
309
 
285
310
  # Calculate average performance score
286
311
  if results["files_analyzed"] > 0:
@@ -292,7 +317,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
292
317
 
293
318
  return results
294
319
 
295
- def _scan_performance_patterns(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
320
+ def _scan_performance_patterns(
321
+ self, content: str, file_path: Path
322
+ ) -> List[Dict[str, Any]]:
296
323
  """Scan for performance anti-patterns."""
297
324
  issues = []
298
325
 
@@ -300,21 +327,25 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
300
327
  for pattern in pattern_info["patterns"]:
301
328
  matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
302
329
  for match in matches:
303
- line_num = content[:match.start()].count("\n") + 1
304
-
305
- issues.append({
306
- "type": pattern_name,
307
- "severity": pattern_info["severity"],
308
- "category": pattern_info["category"],
309
- "description": pattern_info["description"],
310
- "file": str(file_path),
311
- "line": line_num,
312
- "code": match.group(0)[:100],
313
- })
330
+ line_num = content[: match.start()].count("\n") + 1
331
+
332
+ issues.append(
333
+ {
334
+ "type": pattern_name,
335
+ "severity": pattern_info["severity"],
336
+ "category": pattern_info["category"],
337
+ "description": pattern_info["description"],
338
+ "file": str(file_path),
339
+ "line": line_num,
340
+ "code": match.group(0)[:100],
341
+ }
342
+ )
314
343
 
315
344
  return issues
316
345
 
317
- def _scan_memory_patterns(self, content: str, file_path: Path) -> List[Dict[str, Any]]:
346
+ def _scan_memory_patterns(
347
+ self, content: str, file_path: Path
348
+ ) -> List[Dict[str, Any]]:
318
349
  """Scan for memory usage issues."""
319
350
  issues = []
320
351
 
@@ -322,21 +353,25 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
322
353
  for pattern in pattern_info["patterns"]:
323
354
  matches = re.finditer(pattern, content, re.IGNORECASE | re.MULTILINE)
324
355
  for match in matches:
325
- line_num = content[:match.start()].count("\n") + 1
356
+ line_num = content[: match.start()].count("\n") + 1
326
357
 
327
- issues.append({
328
- "type": f"memory_{pattern_name}",
329
- "severity": "medium",
330
- "category": "memory",
331
- "description": pattern_info["description"],
332
- "file": str(file_path),
333
- "line": line_num,
334
- "code": match.group(0),
335
- })
358
+ issues.append(
359
+ {
360
+ "type": f"memory_{pattern_name}",
361
+ "severity": "medium",
362
+ "category": "memory",
363
+ "description": pattern_info["description"],
364
+ "file": str(file_path),
365
+ "line": line_num,
366
+ "code": match.group(0),
367
+ }
368
+ )
336
369
 
337
370
  return issues
338
371
 
339
- def _analyze_python_performance(self, content: str, file_path: Path) -> Dict[str, Any]:
372
+ def _analyze_python_performance(
373
+ self, content: str, file_path: Path
374
+ ) -> Dict[str, Any]:
340
375
  """Perform Python-specific performance analysis."""
341
376
  results = {
342
377
  "complexity": {},
@@ -366,15 +401,17 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
366
401
 
367
402
  # Check for performance issues
368
403
  if complexity["time_complexity"] in ["O(n^2)", "O(n^3)", "O(2^n)"]:
369
- results["issues"].append({
370
- "type": "high_complexity",
371
- "severity": "high",
372
- "category": "algorithm",
373
- "description": f"Function '{node.name}' has {complexity['time_complexity']} complexity",
374
- "file": str(file_path),
375
- "line": node.lineno,
376
- "code": node.name,
377
- })
404
+ results["issues"].append(
405
+ {
406
+ "type": "high_complexity",
407
+ "severity": "high",
408
+ "category": "algorithm",
409
+ "description": f"Function '{node.name}' has {complexity['time_complexity']} complexity",
410
+ "file": str(file_path),
411
+ "line": node.lineno,
412
+ "code": node.name,
413
+ }
414
+ )
378
415
 
379
416
  self.generic_visit(node)
380
417
  self.current_function = old_function
@@ -383,15 +420,17 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
383
420
  def visit_For(self, node):
384
421
  self.loop_depth += 1
385
422
  if self.loop_depth > 2:
386
- results["issues"].append({
387
- "type": "deep_nesting",
388
- "severity": "medium",
389
- "category": "algorithm",
390
- "description": f"Deep loop nesting (level {self.loop_depth})",
391
- "file": str(file_path),
392
- "line": node.lineno,
393
- "code": f"Loop depth: {self.loop_depth}",
394
- })
423
+ results["issues"].append(
424
+ {
425
+ "type": "deep_nesting",
426
+ "severity": "medium",
427
+ "category": "algorithm",
428
+ "description": f"Deep loop nesting (level {self.loop_depth})",
429
+ "file": str(file_path),
430
+ "line": node.lineno,
431
+ "code": f"Loop depth: {self.loop_depth}",
432
+ }
433
+ )
395
434
  self.generic_visit(node)
396
435
  self.loop_depth -= 1
397
436
 
@@ -400,24 +439,31 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
400
439
 
401
440
  def visit_ListComp(self, node):
402
441
  # Check for nested list comprehensions
403
- nested_comps = sum(
404
- 1 for child in ast.walk(node)
405
- if isinstance(child, (ast.ListComp, ast.SetComp, ast.DictComp))
406
- ) - 1
442
+ nested_comps = (
443
+ sum(
444
+ 1
445
+ for child in ast.walk(node)
446
+ if isinstance(
447
+ child, (ast.ListComp, ast.SetComp, ast.DictComp)
448
+ )
449
+ )
450
+ - 1
451
+ )
407
452
 
408
453
  if nested_comps > 1:
409
- results["optimizations"].append({
410
- "type": "nested_comprehension",
411
- "description": "Consider breaking down nested comprehensions for clarity",
412
- "file": str(file_path),
413
- "line": node.lineno,
414
- })
454
+ results["optimizations"].append(
455
+ {
456
+ "type": "nested_comprehension",
457
+ "description": "Consider breaking down nested comprehensions for clarity",
458
+ "file": str(file_path),
459
+ "line": node.lineno,
460
+ }
461
+ )
415
462
 
416
463
  def _calculate_complexity(self, node):
417
464
  """Calculate time and space complexity of a function."""
418
465
  loop_count = sum(
419
- 1 for n in ast.walk(node)
420
- if isinstance(n, (ast.For, ast.While))
466
+ 1 for n in ast.walk(node) if isinstance(n, (ast.For, ast.While))
421
467
  )
422
468
 
423
469
  # Detect nested loops
@@ -467,26 +513,35 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
467
513
 
468
514
  return results
469
515
 
470
- def _check_python_optimizations(self, tree: ast.AST, results: Dict, file_path: Path):
516
+ def _check_python_optimizations(
517
+ self, tree: ast.AST, results: Dict, file_path: Path
518
+ ):
471
519
  """Check for Python-specific optimization opportunities."""
520
+
472
521
  class OptimizationVisitor(ast.NodeVisitor):
473
522
  def visit_For(self, node):
474
523
  # Check for range(len()) anti-pattern
475
524
  if isinstance(node.iter, ast.Call):
476
- if (isinstance(node.iter.func, ast.Name) and
477
- node.iter.func.id == "range" and
478
- len(node.iter.args) == 1):
525
+ if (
526
+ isinstance(node.iter.func, ast.Name)
527
+ and node.iter.func.id == "range"
528
+ and len(node.iter.args) == 1
529
+ ):
479
530
 
480
531
  if isinstance(node.iter.args[0], ast.Call):
481
- if (isinstance(node.iter.args[0].func, ast.Name) and
482
- node.iter.args[0].func.id == "len"):
483
-
484
- results["optimizations"].append({
485
- "type": "range_len_pattern",
486
- "description": "Use enumerate() instead of range(len())",
487
- "file": str(file_path),
488
- "line": node.lineno,
489
- })
532
+ if (
533
+ isinstance(node.iter.args[0].func, ast.Name)
534
+ and node.iter.args[0].func.id == "len"
535
+ ):
536
+
537
+ results["optimizations"].append(
538
+ {
539
+ "type": "range_len_pattern",
540
+ "description": "Use enumerate() instead of range(len())",
541
+ "file": str(file_path),
542
+ "line": node.lineno,
543
+ }
544
+ )
490
545
 
491
546
  self.generic_visit(node)
492
547
 
@@ -500,22 +555,31 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
500
555
  break
501
556
 
502
557
  if parent and isinstance(parent, ast.Call):
503
- if (isinstance(parent.func, ast.Name) and
504
- parent.func.id in ["sum", "any", "all", "min", "max"]):
505
-
506
- results["optimizations"].append({
507
- "type": "generator_opportunity",
508
- "description": "Consider using generator expression instead of list comprehension",
509
- "file": str(file_path),
510
- "line": node.lineno,
511
- })
558
+ if isinstance(parent.func, ast.Name) and parent.func.id in [
559
+ "sum",
560
+ "any",
561
+ "all",
562
+ "min",
563
+ "max",
564
+ ]:
565
+
566
+ results["optimizations"].append(
567
+ {
568
+ "type": "generator_opportunity",
569
+ "description": "Consider using generator expression instead of list comprehension",
570
+ "file": str(file_path),
571
+ "line": node.lineno,
572
+ }
573
+ )
512
574
 
513
575
  self.generic_visit(node)
514
576
 
515
577
  visitor = OptimizationVisitor()
516
578
  visitor.visit(tree)
517
579
 
518
- def _analyze_javascript_performance(self, content: str, file_path: Path) -> Dict[str, Any]:
580
+ def _analyze_javascript_performance(
581
+ self, content: str, file_path: Path
582
+ ) -> Dict[str, Any]:
519
583
  """Perform JavaScript-specific performance analysis."""
520
584
  results = {
521
585
  "issues": [],
@@ -525,7 +589,7 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
525
589
  # Check for common JS performance issues
526
590
  js_patterns = {
527
591
  "dom_in_loop": {
528
- "pattern": r'for\s*\([^)]*\)\s*{\s*[^}]*document\.(getElementById|querySelector)',
592
+ "pattern": r"for\s*\([^)]*\)\s*{\s*[^}]*document\.(getElementById|querySelector)",
529
593
  "description": "DOM access inside loop - consider caching",
530
594
  "severity": "high",
531
595
  },
@@ -535,50 +599,60 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
535
599
  "severity": "medium",
536
600
  },
537
601
  "sync_ajax": {
538
- "pattern": r'async\s*:\s*false',
602
+ "pattern": r"async\s*:\s*false",
539
603
  "description": "Synchronous AJAX request",
540
604
  "severity": "high",
541
605
  },
542
606
  "inefficient_array": {
543
- "pattern": r'\.shift\s*\(\s*\)',
607
+ "pattern": r"\.shift\s*\(\s*\)",
544
608
  "description": "Array.shift() is O(n) - consider using different data structure",
545
609
  "severity": "medium",
546
610
  },
547
611
  }
548
612
 
549
613
  for issue_name, issue_info in js_patterns.items():
550
- matches = re.finditer(issue_info["pattern"], content, re.IGNORECASE | re.MULTILINE)
614
+ matches = re.finditer(
615
+ issue_info["pattern"], content, re.IGNORECASE | re.MULTILINE
616
+ )
551
617
  for match in matches:
552
- line_num = content[:match.start()].count("\n") + 1
553
-
554
- results["issues"].append({
555
- "type": f"js_{issue_name}",
556
- "severity": issue_info["severity"],
557
- "category": "performance",
558
- "description": issue_info["description"],
559
- "file": str(file_path),
560
- "line": line_num,
561
- "code": match.group(0)[:100],
562
- })
618
+ line_num = content[: match.start()].count("\n") + 1
619
+
620
+ results["issues"].append(
621
+ {
622
+ "type": f"js_{issue_name}",
623
+ "severity": issue_info["severity"],
624
+ "category": "performance",
625
+ "description": issue_info["description"],
626
+ "file": str(file_path),
627
+ "line": line_num,
628
+ "code": match.group(0)[:100],
629
+ }
630
+ )
563
631
 
564
632
  # Check for optimization opportunities
565
633
  if "forEach" in content and "return" not in content:
566
- results["optimizations"].append({
567
- "type": "use_for_of",
568
- "description": "Consider using for...of instead of forEach for better performance",
569
- "file": str(file_path),
570
- })
634
+ results["optimizations"].append(
635
+ {
636
+ "type": "use_for_of",
637
+ "description": "Consider using for...of instead of forEach for better performance",
638
+ "file": str(file_path),
639
+ }
640
+ )
571
641
 
572
642
  if ".map(" in content and ".filter(" in content:
573
- results["optimizations"].append({
574
- "type": "combine_array_methods",
575
- "description": "Consider combining map and filter operations for better performance",
576
- "file": str(file_path),
577
- })
643
+ results["optimizations"].append(
644
+ {
645
+ "type": "combine_array_methods",
646
+ "description": "Consider combining map and filter operations for better performance",
647
+ "file": str(file_path),
648
+ }
649
+ )
578
650
 
579
651
  return results
580
652
 
581
- def _analyze_ast_performance(self, node: ast.AST, options: Dict[str, Any]) -> Dict[str, Any]:
653
+ def _analyze_ast_performance(
654
+ self, node: ast.AST, options: Dict[str, Any]
655
+ ) -> Dict[str, Any]:
582
656
  """Analyze performance of an AST node."""
583
657
  results = {
584
658
  "status": "success",
@@ -590,22 +664,27 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
590
664
  # Calculate complexity for the node
591
665
  if isinstance(node, ast.FunctionDef):
592
666
  loop_count = sum(
593
- 1 for n in ast.walk(node)
594
- if isinstance(n, (ast.For, ast.While))
667
+ 1 for n in ast.walk(node) if isinstance(n, (ast.For, ast.While))
595
668
  )
596
669
 
597
670
  results["complexity"] = {
598
671
  "name": node.name,
599
672
  "loop_count": loop_count,
600
- "line_count": node.end_lineno - node.lineno + 1 if hasattr(node, "end_lineno") else 0,
673
+ "line_count": (
674
+ node.end_lineno - node.lineno + 1
675
+ if hasattr(node, "end_lineno")
676
+ else 0
677
+ ),
601
678
  }
602
679
 
603
680
  if loop_count > 3:
604
- results["issues"].append({
605
- "type": "excessive_loops",
606
- "severity": "medium",
607
- "description": f"Function has {loop_count} loops",
608
- })
681
+ results["issues"].append(
682
+ {
683
+ "type": "excessive_loops",
684
+ "severity": "medium",
685
+ "description": f"Function has {loop_count} loops",
686
+ }
687
+ )
609
688
 
610
689
  return results
611
690
 
@@ -627,7 +706,9 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
627
706
 
628
707
  return max(0, score)
629
708
 
630
- def _generate_optimizations(self, issues: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
709
+ def _generate_optimizations(
710
+ self, issues: List[Dict[str, Any]]
711
+ ) -> List[Dict[str, Any]]:
631
712
  """Generate optimization recommendations based on issues."""
632
713
  optimizations = []
633
714
  categories = set()
@@ -637,32 +718,40 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
637
718
  categories.add(category)
638
719
 
639
720
  if "database" in categories:
640
- optimizations.append({
641
- "type": "database_optimization",
642
- "description": "Consider adding database indexes and optimizing queries",
643
- "priority": "high",
644
- })
721
+ optimizations.append(
722
+ {
723
+ "type": "database_optimization",
724
+ "description": "Consider adding database indexes and optimizing queries",
725
+ "priority": "high",
726
+ }
727
+ )
645
728
 
646
729
  if "algorithm" in categories:
647
- optimizations.append({
648
- "type": "algorithm_optimization",
649
- "description": "Review algorithm complexity and consider more efficient approaches",
650
- "priority": "high",
651
- })
730
+ optimizations.append(
731
+ {
732
+ "type": "algorithm_optimization",
733
+ "description": "Review algorithm complexity and consider more efficient approaches",
734
+ "priority": "high",
735
+ }
736
+ )
652
737
 
653
738
  if "memory" in categories:
654
- optimizations.append({
655
- "type": "memory_optimization",
656
- "description": "Optimize memory usage by using generators and avoiding large allocations",
657
- "priority": "medium",
658
- })
739
+ optimizations.append(
740
+ {
741
+ "type": "memory_optimization",
742
+ "description": "Optimize memory usage by using generators and avoiding large allocations",
743
+ "priority": "medium",
744
+ }
745
+ )
659
746
 
660
747
  if "io" in categories:
661
- optimizations.append({
662
- "type": "io_optimization",
663
- "description": "Use asynchronous I/O operations to improve responsiveness",
664
- "priority": "medium",
665
- })
748
+ optimizations.append(
749
+ {
750
+ "type": "io_optimization",
751
+ "description": "Use asynchronous I/O operations to improve responsiveness",
752
+ "priority": "medium",
753
+ }
754
+ )
666
755
 
667
756
  return optimizations
668
757
 
@@ -673,9 +762,7 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
673
762
  "total_issues": results["total_issues"],
674
763
  "average_score": results["performance_score"],
675
764
  "top_categories": sorted(
676
- results["issues_by_category"].items(),
677
- key=lambda x: x[1],
678
- reverse=True
765
+ results["issues_by_category"].items(), key=lambda x: x[1], reverse=True
679
766
  )[:3],
680
767
  }
681
768
 
@@ -720,26 +807,34 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
720
807
  return metrics
721
808
 
722
809
  if analysis_result.get("type") == "file":
723
- metrics.update({
724
- "performance_score": analysis_result.get("performance_score", 0),
725
- "issue_count": len(analysis_result.get("issues", [])),
726
- "optimization_count": len(analysis_result.get("optimizations", [])),
727
- })
810
+ metrics.update(
811
+ {
812
+ "performance_score": analysis_result.get("performance_score", 0),
813
+ "issue_count": len(analysis_result.get("issues", [])),
814
+ "optimization_count": len(analysis_result.get("optimizations", [])),
815
+ }
816
+ )
728
817
 
729
818
  # Add complexity metrics if available
730
819
  if "complexity" in analysis_result:
731
820
  for func_name, complexity in analysis_result["complexity"].items():
732
- metrics[f"complexity_{func_name}"] = complexity.get("time_complexity", "O(1)")
821
+ metrics[f"complexity_{func_name}"] = complexity.get(
822
+ "time_complexity", "O(1)"
823
+ )
733
824
 
734
825
  elif analysis_result.get("type") == "directory":
735
- metrics.update({
736
- "files_analyzed": analysis_result.get("files_analyzed", 0),
737
- "total_issues": analysis_result.get("total_issues", 0),
738
- "average_score": analysis_result.get("performance_score", 0),
739
- })
826
+ metrics.update(
827
+ {
828
+ "files_analyzed": analysis_result.get("files_analyzed", 0),
829
+ "total_issues": analysis_result.get("total_issues", 0),
830
+ "average_score": analysis_result.get("performance_score", 0),
831
+ }
832
+ )
740
833
 
741
834
  # Add category breakdown
742
- for category, count in analysis_result.get("issues_by_category", {}).items():
835
+ for category, count in analysis_result.get(
836
+ "issues_by_category", {}
837
+ ).items():
743
838
  metrics[f"category_{category}"] = count
744
839
 
745
840
  return metrics
@@ -764,8 +859,12 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
764
859
  baseline_metrics = self.extract_metrics(baseline)
765
860
  current_metrics = self.extract_metrics(current)
766
861
 
767
- baseline_issues = baseline_metrics.get("total_issues", baseline_metrics.get("issue_count", 0))
768
- current_issues = current_metrics.get("total_issues", current_metrics.get("issue_count", 0))
862
+ baseline_issues = baseline_metrics.get(
863
+ "total_issues", baseline_metrics.get("issue_count", 0)
864
+ )
865
+ current_issues = current_metrics.get(
866
+ "total_issues", current_metrics.get("issue_count", 0)
867
+ )
769
868
 
770
869
  if current_issues < baseline_issues:
771
870
  comparison["improvements"].append(
@@ -801,4 +900,4 @@ class PerformanceAnalyzerStrategy(AnalyzerStrategy):
801
900
  f"Significant performance regression: {comparison['score_change']:.1f} points"
802
901
  )
803
902
 
804
- return comparison
903
+ return comparison