claude-mpm 4.1.8__py3-none-any.whl → 4.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/INSTRUCTIONS.md +26 -1
  3. claude_mpm/agents/agents_metadata.py +57 -0
  4. claude_mpm/agents/templates/.claude-mpm/memories/README.md +17 -0
  5. claude_mpm/agents/templates/.claude-mpm/memories/engineer_memories.md +3 -0
  6. claude_mpm/agents/templates/agent-manager.json +263 -17
  7. claude_mpm/agents/templates/agentic_coder_optimizer.json +222 -0
  8. claude_mpm/agents/templates/code_analyzer.json +18 -8
  9. claude_mpm/agents/templates/engineer.json +1 -1
  10. claude_mpm/agents/templates/logs/prompts/agent_engineer_20250826_014258_728.md +39 -0
  11. claude_mpm/agents/templates/qa.json +1 -1
  12. claude_mpm/agents/templates/research.json +1 -1
  13. claude_mpm/cli/__init__.py +4 -0
  14. claude_mpm/cli/commands/__init__.py +6 -0
  15. claude_mpm/cli/commands/analyze.py +547 -0
  16. claude_mpm/cli/commands/analyze_code.py +524 -0
  17. claude_mpm/cli/commands/configure.py +77 -28
  18. claude_mpm/cli/commands/configure_tui.py +60 -60
  19. claude_mpm/cli/commands/debug.py +1387 -0
  20. claude_mpm/cli/parsers/analyze_code_parser.py +170 -0
  21. claude_mpm/cli/parsers/analyze_parser.py +135 -0
  22. claude_mpm/cli/parsers/base_parser.py +29 -0
  23. claude_mpm/cli/parsers/debug_parser.py +319 -0
  24. claude_mpm/constants.py +3 -1
  25. claude_mpm/core/framework_loader.py +148 -6
  26. claude_mpm/core/log_manager.py +16 -13
  27. claude_mpm/core/logger.py +1 -1
  28. claude_mpm/core/unified_agent_registry.py +1 -1
  29. claude_mpm/dashboard/.claude-mpm/socketio-instances.json +1 -0
  30. claude_mpm/dashboard/analysis_runner.py +428 -0
  31. claude_mpm/dashboard/static/built/components/activity-tree.js +2 -0
  32. claude_mpm/dashboard/static/built/components/agent-inference.js +1 -1
  33. claude_mpm/dashboard/static/built/components/event-viewer.js +1 -1
  34. claude_mpm/dashboard/static/built/components/file-tool-tracker.js +1 -1
  35. claude_mpm/dashboard/static/built/components/module-viewer.js +1 -1
  36. claude_mpm/dashboard/static/built/components/session-manager.js +1 -1
  37. claude_mpm/dashboard/static/built/components/working-directory.js +1 -1
  38. claude_mpm/dashboard/static/built/dashboard.js +1 -1
  39. claude_mpm/dashboard/static/built/socket-client.js +1 -1
  40. claude_mpm/dashboard/static/css/activity.css +549 -0
  41. claude_mpm/dashboard/static/css/code-tree.css +846 -0
  42. claude_mpm/dashboard/static/css/dashboard.css +245 -0
  43. claude_mpm/dashboard/static/dist/components/activity-tree.js +2 -0
  44. claude_mpm/dashboard/static/dist/components/code-tree.js +2 -0
  45. claude_mpm/dashboard/static/dist/components/code-viewer.js +2 -0
  46. claude_mpm/dashboard/static/dist/components/event-viewer.js +1 -1
  47. claude_mpm/dashboard/static/dist/components/session-manager.js +1 -1
  48. claude_mpm/dashboard/static/dist/components/working-directory.js +1 -1
  49. claude_mpm/dashboard/static/dist/dashboard.js +1 -1
  50. claude_mpm/dashboard/static/dist/socket-client.js +1 -1
  51. claude_mpm/dashboard/static/js/components/activity-tree.js +1139 -0
  52. claude_mpm/dashboard/static/js/components/code-tree.js +1357 -0
  53. claude_mpm/dashboard/static/js/components/code-viewer.js +480 -0
  54. claude_mpm/dashboard/static/js/components/event-viewer.js +11 -0
  55. claude_mpm/dashboard/static/js/components/session-manager.js +40 -4
  56. claude_mpm/dashboard/static/js/components/socket-manager.js +12 -0
  57. claude_mpm/dashboard/static/js/components/ui-state-manager.js +4 -0
  58. claude_mpm/dashboard/static/js/components/working-directory.js +17 -1
  59. claude_mpm/dashboard/static/js/dashboard.js +39 -0
  60. claude_mpm/dashboard/static/js/socket-client.js +414 -20
  61. claude_mpm/dashboard/templates/index.html +184 -4
  62. claude_mpm/hooks/claude_hooks/hook_handler.py +182 -5
  63. claude_mpm/hooks/claude_hooks/installer.py +386 -113
  64. claude_mpm/scripts/claude-hook-handler.sh +161 -0
  65. claude_mpm/scripts/socketio_daemon.py +121 -8
  66. claude_mpm/services/agents/deployment/agent_lifecycle_manager_refactored.py +2 -2
  67. claude_mpm/services/agents/deployment/agent_record_service.py +1 -2
  68. claude_mpm/services/agents/memory/memory_format_service.py +1 -5
  69. claude_mpm/services/cli/agent_cleanup_service.py +1 -2
  70. claude_mpm/services/cli/agent_dependency_service.py +1 -1
  71. claude_mpm/services/cli/agent_validation_service.py +3 -4
  72. claude_mpm/services/cli/dashboard_launcher.py +2 -3
  73. claude_mpm/services/cli/startup_checker.py +0 -10
  74. claude_mpm/services/core/cache_manager.py +1 -2
  75. claude_mpm/services/core/path_resolver.py +1 -4
  76. claude_mpm/services/core/service_container.py +2 -2
  77. claude_mpm/services/diagnostics/checks/instructions_check.py +1 -2
  78. claude_mpm/services/infrastructure/monitoring/__init__.py +11 -11
  79. claude_mpm/services/infrastructure/monitoring.py +11 -11
  80. claude_mpm/services/project/architecture_analyzer.py +1 -1
  81. claude_mpm/services/project/dependency_analyzer.py +4 -4
  82. claude_mpm/services/project/language_analyzer.py +3 -3
  83. claude_mpm/services/project/metrics_collector.py +3 -6
  84. claude_mpm/services/socketio/handlers/__init__.py +2 -0
  85. claude_mpm/services/socketio/handlers/code_analysis.py +170 -0
  86. claude_mpm/services/socketio/handlers/registry.py +2 -0
  87. claude_mpm/services/socketio/server/connection_manager.py +4 -4
  88. claude_mpm/services/socketio/server/core.py +100 -11
  89. claude_mpm/services/socketio/server/main.py +8 -2
  90. claude_mpm/services/visualization/__init__.py +19 -0
  91. claude_mpm/services/visualization/mermaid_generator.py +938 -0
  92. claude_mpm/tools/__main__.py +208 -0
  93. claude_mpm/tools/code_tree_analyzer.py +778 -0
  94. claude_mpm/tools/code_tree_builder.py +632 -0
  95. claude_mpm/tools/code_tree_events.py +318 -0
  96. claude_mpm/tools/socketio_debug.py +671 -0
  97. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/METADATA +1 -1
  98. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/RECORD +102 -73
  99. claude_mpm/agents/schema/agent_schema.json +0 -314
  100. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/WHEEL +0 -0
  101. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/entry_points.txt +0 -0
  102. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/licenses/LICENSE +0 -0
  103. {claude_mpm-4.1.8.dist-info → claude_mpm-4.1.10.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,10 @@
1
1
  """Framework loader for Claude MPM."""
2
2
 
3
+ import getpass
4
+ import locale
3
5
  import logging
6
+ import os
7
+ import platform
4
8
  import time
5
9
  from datetime import datetime
6
10
  from pathlib import Path
@@ -995,11 +999,8 @@ class FrameworkLoader:
995
999
  # Add dynamic agent capabilities section
996
1000
  instructions += self._generate_agent_capabilities_section()
997
1001
 
998
- # Add current date for temporal awareness
999
- instructions += f"\n\n## Temporal Context\n**Today's Date**: {datetime.now().strftime('%Y-%m-%d')}\n"
1000
- instructions += (
1001
- "Apply date awareness to all time-sensitive tasks and decisions.\n"
1002
- )
1002
+ # Add enhanced temporal and user context for better awareness
1003
+ instructions += self._generate_temporal_user_context()
1003
1004
 
1004
1005
  # Add BASE_PM.md framework requirements AFTER INSTRUCTIONS.md
1005
1006
  if self.framework_content.get("base_pm_instructions"):
@@ -1322,6 +1323,147 @@ Extract tickets from these patterns:
1322
1323
  self._agent_capabilities_cache_time = current_time
1323
1324
  return result
1324
1325
 
1326
+ def _generate_temporal_user_context(self) -> str:
1327
+ """Generate enhanced temporal and user context for better PM awareness.
1328
+
1329
+ Returns:
1330
+ str: Formatted context string with datetime, user, and system information
1331
+ """
1332
+ context_lines = ["\n\n## Temporal & User Context\n"]
1333
+
1334
+ try:
1335
+ # Get current datetime with timezone awareness
1336
+ now = datetime.now()
1337
+
1338
+ # Try to get timezone info - fallback to UTC offset if timezone name not available
1339
+ try:
1340
+ import time as time_module
1341
+
1342
+ if hasattr(time_module, "tzname"):
1343
+ tz_name = time_module.tzname[time_module.daylight]
1344
+ tz_offset = time_module.strftime("%z")
1345
+ if tz_offset:
1346
+ # Format UTC offset properly (e.g., -0800 to -08:00)
1347
+ tz_offset = (
1348
+ f"{tz_offset[:3]}:{tz_offset[3:]}"
1349
+ if len(tz_offset) >= 4
1350
+ else tz_offset
1351
+ )
1352
+ tz_info = f"{tz_name} (UTC{tz_offset})"
1353
+ else:
1354
+ tz_info = tz_name
1355
+ else:
1356
+ tz_info = "Local Time"
1357
+ except Exception:
1358
+ tz_info = "Local Time"
1359
+
1360
+ # Format datetime components
1361
+ date_str = now.strftime("%Y-%m-%d")
1362
+ time_str = now.strftime("%H:%M:%S")
1363
+ day_name = now.strftime("%A")
1364
+
1365
+ context_lines.append(
1366
+ f"**Current DateTime**: {date_str} {time_str} {tz_info}\n"
1367
+ )
1368
+ context_lines.append(f"**Day**: {day_name}\n")
1369
+
1370
+ except Exception as e:
1371
+ # Fallback to basic date if enhanced datetime fails
1372
+ self.logger.debug(f"Error generating enhanced datetime context: {e}")
1373
+ context_lines.append(
1374
+ f"**Today's Date**: {datetime.now().strftime('%Y-%m-%d')}\n"
1375
+ )
1376
+
1377
+ try:
1378
+ # Get user information with safe fallbacks
1379
+ username = None
1380
+
1381
+ # Try multiple methods to get username
1382
+ methods = [
1383
+ lambda: os.environ.get("USER"),
1384
+ lambda: os.environ.get("USERNAME"), # Windows fallback
1385
+ lambda: getpass.getuser(),
1386
+ ]
1387
+
1388
+ for method in methods:
1389
+ try:
1390
+ username = method()
1391
+ if username:
1392
+ break
1393
+ except Exception:
1394
+ continue
1395
+
1396
+ if username:
1397
+ context_lines.append(f"**User**: {username}\n")
1398
+
1399
+ # Add home directory if available
1400
+ try:
1401
+ home_dir = os.path.expanduser("~")
1402
+ if home_dir and home_dir != "~":
1403
+ context_lines.append(f"**Home Directory**: {home_dir}\n")
1404
+ except Exception:
1405
+ pass
1406
+
1407
+ except Exception as e:
1408
+ # User detection is optional, don't fail
1409
+ self.logger.debug(f"Could not detect user information: {e}")
1410
+
1411
+ try:
1412
+ # Get system information
1413
+ system_info = platform.system()
1414
+ if system_info:
1415
+ # Enhance system name for common platforms
1416
+ system_names = {
1417
+ "Darwin": "Darwin (macOS)",
1418
+ "Linux": "Linux",
1419
+ "Windows": "Windows",
1420
+ }
1421
+ system_display = system_names.get(system_info, system_info)
1422
+ context_lines.append(f"**System**: {system_display}\n")
1423
+
1424
+ # Add platform version if available
1425
+ try:
1426
+ platform_version = platform.release()
1427
+ if platform_version:
1428
+ context_lines.append(
1429
+ f"**System Version**: {platform_version}\n"
1430
+ )
1431
+ except Exception:
1432
+ pass
1433
+
1434
+ except Exception as e:
1435
+ # System info is optional
1436
+ self.logger.debug(f"Could not detect system information: {e}")
1437
+
1438
+ try:
1439
+ # Add current working directory
1440
+ cwd = os.getcwd()
1441
+ if cwd:
1442
+ context_lines.append(f"**Working Directory**: {cwd}\n")
1443
+ except Exception:
1444
+ pass
1445
+
1446
+ try:
1447
+ # Add locale information if available
1448
+ current_locale = locale.getlocale()
1449
+ if current_locale and current_locale[0]:
1450
+ context_lines.append(f"**Locale**: {current_locale[0]}\n")
1451
+ except Exception:
1452
+ # Locale is optional
1453
+ pass
1454
+
1455
+ # Add instruction for applying context
1456
+ context_lines.append(
1457
+ "\nApply temporal and user awareness to all tasks, "
1458
+ "decisions, and interactions.\n"
1459
+ )
1460
+ context_lines.append(
1461
+ "Use this context for personalized responses and "
1462
+ "time-sensitive operations.\n"
1463
+ )
1464
+
1465
+ return "".join(context_lines)
1466
+
1325
1467
  def _parse_agent_metadata(self, agent_file: Path) -> Optional[Dict[str, Any]]:
1326
1468
  """Parse agent metadata from deployed agent file.
1327
1469
  Uses caching based on file path and modification time.
@@ -1333,7 +1475,7 @@ Extract tickets from these patterns:
1333
1475
  # Check cache based on file path and modification time
1334
1476
  cache_key = str(agent_file)
1335
1477
  file_mtime = agent_file.stat().st_mtime
1336
- current_time = time.time()
1478
+ time.time()
1337
1479
 
1338
1480
  # Try to get from cache first
1339
1481
  cached_result = self._cache_manager.get_agent_metadata(cache_key)
@@ -101,9 +101,7 @@ class LogManager:
101
101
  }
102
102
 
103
103
  # Base directories
104
- self.base_log_dir = Path(
105
- logging_config.get("base_directory", ".claude-mpm/logs")
106
- )
104
+ self.base_log_dir = Path(logging_config.get("base_directory", "logs"))
107
105
  if not self.base_log_dir.is_absolute():
108
106
  self.base_log_dir = Path.cwd() / self.base_log_dir
109
107
 
@@ -352,27 +350,32 @@ class LogManager:
352
350
  """
353
351
  One-time migration to move existing MPM logs to new subdirectory.
354
352
 
355
- Moves mpm_*.log files from .claude-mpm/logs/ to .claude-mpm/logs/mpm/
353
+ Moves mpm_*.log files from .claude-mpm/logs/ to logs/mpm/
356
354
  """
357
355
  try:
358
- old_location = self.base_log_dir
356
+ # Check both old possible locations
357
+ old_locations = [
358
+ Path.cwd() / ".claude-mpm" / "logs", # Old default location
359
+ self.base_log_dir, # Current base location (logs/)
360
+ ]
359
361
  new_location = self.base_log_dir / "mpm"
360
362
 
361
- # Only proceed if old location exists and has MPM logs
362
- if not old_location.exists():
363
- return
364
-
365
- # Find all MPM log files in the old location
366
- mpm_logs = list(old_location.glob("mpm_*.log"))
363
+ # Collect all MPM logs from all old locations
364
+ all_mpm_logs = []
365
+ for old_location in old_locations:
366
+ if old_location.exists() and old_location != new_location:
367
+ # Find all MPM log files in this location
368
+ mpm_logs = list(old_location.glob("mpm_*.log"))
369
+ all_mpm_logs.extend(mpm_logs)
367
370
 
368
- if not mpm_logs:
371
+ if not all_mpm_logs:
369
372
  return # No logs to migrate
370
373
 
371
374
  # Ensure new directory exists
372
375
  new_location.mkdir(parents=True, exist_ok=True)
373
376
 
374
377
  migrated_count = 0
375
- for log_file in mpm_logs:
378
+ for log_file in all_mpm_logs:
376
379
  try:
377
380
  # Move file to new location
378
381
  new_path = new_location / log_file.name
claude_mpm/core/logger.py CHANGED
@@ -232,7 +232,7 @@ def setup_logging(
232
232
  if log_dir is None:
233
233
  # Use deployment root for logs to keep everything centralized
234
234
  deployment_root = get_project_root()
235
- log_dir = deployment_root / ".claude-mpm" / "logs"
235
+ log_dir = deployment_root / "logs" / "mpm"
236
236
 
237
237
  log_dir.mkdir(parents=True, exist_ok=True)
238
238
 
@@ -455,7 +455,7 @@ class UnifiedAgentRegistry:
455
455
  """Apply tier precedence rules to resolve conflicts."""
456
456
  # Group agents by their actual name (without tier suffix)
457
457
  agent_groups = {}
458
- for registry_key, metadata in self.registry.items():
458
+ for _registry_key, metadata in self.registry.items():
459
459
  # Extract the actual agent name (registry_key is "name_tier")
460
460
  agent_name = metadata.name # Use the actual name from metadata
461
461
  if agent_name not in agent_groups:
@@ -0,0 +1,428 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Code Analysis Runner for Dashboard
4
+ ===================================
5
+
6
+ WHY: Manages subprocess execution of code analysis, streaming results to
7
+ Socket.IO clients in real-time while handling cancellation and error recovery.
8
+
9
+ DESIGN DECISIONS:
10
+ - Use subprocess for isolation and cancellation support
11
+ - Stream output line-by-line for real-time updates
12
+ - Queue multiple analysis requests
13
+ - Handle process lifecycle management
14
+ - Convert analyzer events to Socket.IO events
15
+ """
16
+
17
+ import json
18
+ import os
19
+ import subprocess
20
+ import sys
21
+ import threading
22
+ from dataclasses import asdict, dataclass
23
+ from datetime import datetime
24
+ from pathlib import Path
25
+ from queue import Queue
26
+ from typing import Any, Dict, List, Optional
27
+
28
+ from ..core.logging_config import get_logger
29
+
30
+
31
+ @dataclass
32
+ class AnalysisRequest:
33
+ """Represents a code analysis request."""
34
+
35
+ request_id: str
36
+ path: str
37
+ languages: Optional[List[str]] = None
38
+ max_depth: Optional[int] = None
39
+ ignore_patterns: Optional[List[str]] = None
40
+ timestamp: datetime = None
41
+
42
+ def __post_init__(self):
43
+ if self.timestamp is None:
44
+ self.timestamp = datetime.utcnow()
45
+
46
+
47
+ class CodeAnalysisRunner:
48
+ """Manages code analysis subprocess execution for the dashboard.
49
+
50
+ WHY: Provides isolation between the dashboard server and analysis process,
51
+ allowing for cancellation, resource limits, and crash recovery.
52
+ """
53
+
54
+ def __init__(self, socketio_server):
55
+ """Initialize the analysis runner.
56
+
57
+ Args:
58
+ socketio_server: SocketIOServer instance for broadcasting events
59
+ """
60
+ self.logger = get_logger(__name__)
61
+ self.server = socketio_server
62
+ self.current_process = None
63
+ self.current_request = None
64
+ self.request_queue = Queue()
65
+ self.running = False
66
+ self.worker_thread = None
67
+ self.cancel_event = threading.Event()
68
+
69
+ # Statistics
70
+ self.stats = {
71
+ "analyses_started": 0,
72
+ "analyses_completed": 0,
73
+ "analyses_cancelled": 0,
74
+ "analyses_failed": 0,
75
+ "total_files": 0,
76
+ "total_nodes": 0,
77
+ }
78
+
79
+ def start(self):
80
+ """Start the analysis runner worker thread."""
81
+ if self.running:
82
+ return
83
+
84
+ self.running = True
85
+ self.cancel_event.clear()
86
+ self.worker_thread = threading.Thread(target=self._worker_loop, daemon=True)
87
+ self.worker_thread.start()
88
+ self.logger.info("Code analysis runner started")
89
+
90
+ def stop(self):
91
+ """Stop the analysis runner and cleanup."""
92
+ self.running = False
93
+ self.cancel_current()
94
+
95
+ # Add sentinel to queue to wake up worker
96
+ self.request_queue.put(None)
97
+
98
+ if self.worker_thread:
99
+ self.worker_thread.join(timeout=5)
100
+
101
+ self.logger.info("Code analysis runner stopped")
102
+
103
+ def request_analysis(
104
+ self,
105
+ request_id: str,
106
+ path: str,
107
+ languages: Optional[List[str]] = None,
108
+ max_depth: Optional[int] = None,
109
+ ignore_patterns: Optional[List[str]] = None,
110
+ ) -> bool:
111
+ """Queue a new analysis request.
112
+
113
+ Args:
114
+ request_id: Unique request identifier
115
+ path: Directory path to analyze
116
+ languages: Optional list of languages to filter
117
+ max_depth: Optional maximum directory depth
118
+ ignore_patterns: Optional list of patterns to ignore
119
+
120
+ Returns:
121
+ True if request was queued successfully
122
+ """
123
+ # Validate path
124
+ analysis_path = Path(path).resolve()
125
+ if not analysis_path.exists():
126
+ self._emit_error(request_id, f"Path does not exist: {path}")
127
+ return False
128
+
129
+ if not analysis_path.is_dir():
130
+ self._emit_error(request_id, f"Path is not a directory: {path}")
131
+ return False
132
+
133
+ # Create request
134
+ request = AnalysisRequest(
135
+ request_id=request_id,
136
+ path=str(analysis_path),
137
+ languages=languages,
138
+ max_depth=max_depth,
139
+ ignore_patterns=ignore_patterns,
140
+ )
141
+
142
+ # Queue request
143
+ self.request_queue.put(request)
144
+ self.logger.info(f"Queued analysis request {request_id} for {path}")
145
+
146
+ # Emit queued event
147
+ self._emit_event(
148
+ "code:analysis:queued",
149
+ {
150
+ "request_id": request_id,
151
+ "path": str(analysis_path),
152
+ "queue_size": self.request_queue.qsize(),
153
+ },
154
+ )
155
+
156
+ return True
157
+
158
+ def cancel_current(self):
159
+ """Cancel the currently running analysis."""
160
+ if self.current_process and self.current_process.poll() is None:
161
+ self.cancel_event.set()
162
+
163
+ # Try graceful termination first
164
+ self.current_process.terminate()
165
+ try:
166
+ self.current_process.wait(timeout=2)
167
+ except subprocess.TimeoutExpired:
168
+ # Force kill if needed
169
+ self.current_process.kill()
170
+ self.current_process.wait()
171
+
172
+ self.stats["analyses_cancelled"] += 1
173
+
174
+ if self.current_request:
175
+ self._emit_event(
176
+ "code:analysis:cancelled",
177
+ {
178
+ "request_id": self.current_request.request_id,
179
+ "path": self.current_request.path,
180
+ },
181
+ )
182
+
183
+ self.logger.info("Cancelled current analysis")
184
+
185
+ def get_status(self) -> Dict[str, Any]:
186
+ """Get current runner status.
187
+
188
+ Returns:
189
+ Dictionary with current status and statistics
190
+ """
191
+ return {
192
+ "running": self.running,
193
+ "current_request": (
194
+ asdict(self.current_request) if self.current_request else None
195
+ ),
196
+ "queue_size": self.request_queue.qsize(),
197
+ "stats": self.stats.copy(),
198
+ }
199
+
200
+ def _worker_loop(self):
201
+ """Worker thread loop for processing analysis requests."""
202
+ while self.running:
203
+ try:
204
+ # Get next request (blocking with timeout)
205
+ request = self.request_queue.get(timeout=1)
206
+
207
+ if request is None: # Sentinel value
208
+ break
209
+
210
+ # Reset cancel event
211
+ self.cancel_event.clear()
212
+
213
+ # Process request
214
+ self._process_request(request)
215
+
216
+ except Exception as e:
217
+ self.logger.error(f"Error in worker loop: {e}")
218
+
219
+ def _process_request(self, request: AnalysisRequest):
220
+ """Process a single analysis request.
221
+
222
+ Args:
223
+ request: The analysis request to process
224
+ """
225
+ self.current_request = request
226
+ self.stats["analyses_started"] += 1
227
+
228
+ try:
229
+ # Emit start event
230
+ self._emit_event(
231
+ "code:analysis:start",
232
+ {
233
+ "request_id": request.request_id,
234
+ "path": request.path,
235
+ "languages": request.languages,
236
+ "timestamp": request.timestamp.isoformat(),
237
+ },
238
+ )
239
+
240
+ # Build command
241
+ cmd = self._build_command(request)
242
+
243
+ # Start subprocess
244
+ self.current_process = subprocess.Popen(
245
+ cmd,
246
+ stdout=subprocess.PIPE,
247
+ stderr=subprocess.PIPE,
248
+ text=True,
249
+ bufsize=1,
250
+ universal_newlines=True,
251
+ env=self._get_subprocess_env(),
252
+ )
253
+
254
+ # Process output
255
+ self._process_output(request)
256
+
257
+ # Wait for completion
258
+ return_code = self.current_process.wait()
259
+
260
+ if self.cancel_event.is_set():
261
+ # Analysis was cancelled
262
+ pass # Event already emitted in cancel_current
263
+ elif return_code == 0:
264
+ # Success
265
+ self.stats["analyses_completed"] += 1
266
+ self._emit_event(
267
+ "code:analysis:complete",
268
+ {
269
+ "request_id": request.request_id,
270
+ "path": request.path,
271
+ "stats": {
272
+ "total_files": self.stats["total_files"],
273
+ "total_nodes": self.stats["total_nodes"],
274
+ },
275
+ },
276
+ )
277
+ else:
278
+ # Failure
279
+ self.stats["analyses_failed"] += 1
280
+ stderr = (
281
+ self.current_process.stderr.read()
282
+ if self.current_process.stderr
283
+ else ""
284
+ )
285
+ self._emit_error(
286
+ request.request_id,
287
+ f"Analysis failed with code {return_code}: {stderr}",
288
+ )
289
+
290
+ except Exception as e:
291
+ self.logger.error(f"Error processing request {request.request_id}: {e}")
292
+ self.stats["analyses_failed"] += 1
293
+ self._emit_error(request.request_id, str(e))
294
+
295
+ finally:
296
+ self.current_process = None
297
+ self.current_request = None
298
+
299
+ def _build_command(self, request: AnalysisRequest) -> List[str]:
300
+ """Build the subprocess command for analysis.
301
+
302
+ Args:
303
+ request: The analysis request
304
+
305
+ Returns:
306
+ Command list for subprocess.Popen
307
+ """
308
+ # Get Python executable
309
+ python_exe = sys.executable
310
+
311
+ # Build command - use the tools module as a runnable module
312
+ cmd = [
313
+ python_exe,
314
+ "-m",
315
+ "claude_mpm.tools",
316
+ "--path",
317
+ request.path,
318
+ "--emit-events",
319
+ "--output-format",
320
+ "json-stream",
321
+ ]
322
+
323
+ # Add optional parameters
324
+ if request.languages:
325
+ cmd.extend(["--languages", ",".join(request.languages)])
326
+
327
+ if request.max_depth:
328
+ cmd.extend(["--max-depth", str(request.max_depth)])
329
+
330
+ if request.ignore_patterns:
331
+ for pattern in request.ignore_patterns:
332
+ cmd.extend(["--ignore", pattern])
333
+
334
+ return cmd
335
+
336
+ def _get_subprocess_env(self) -> Dict[str, str]:
337
+ """Get environment variables for subprocess.
338
+
339
+ Returns:
340
+ Environment dictionary for subprocess
341
+ """
342
+ env = os.environ.copy()
343
+
344
+ # Ensure Socket.IO URL is set for event emission
345
+ env["SOCKETIO_URL"] = f"http://localhost:{self.server.port}"
346
+
347
+ # Set Python path to include our modules
348
+ python_path = env.get("PYTHONPATH", "")
349
+ src_path = str(Path(__file__).parent.parent.parent)
350
+ if src_path not in python_path:
351
+ env["PYTHONPATH"] = f"{src_path}:{python_path}" if python_path else src_path
352
+
353
+ return env
354
+
355
+ def _process_output(self, request: AnalysisRequest):
356
+ """Process subprocess output and emit events.
357
+
358
+ Args:
359
+ request: The current analysis request
360
+ """
361
+ if not self.current_process:
362
+ return
363
+
364
+ # Read output line by line
365
+ for line in iter(self.current_process.stdout.readline, ""):
366
+ if self.cancel_event.is_set():
367
+ break
368
+
369
+ line = line.strip()
370
+ if not line:
371
+ continue
372
+
373
+ try:
374
+ # Parse JSON event
375
+ event = json.loads(line)
376
+
377
+ # Route event to appropriate handler
378
+ event_type = event.get("type")
379
+ event_data = event.get("data", {})
380
+
381
+ # Add request ID to event data
382
+ event_data["request_id"] = request.request_id
383
+
384
+ # Update statistics based on event type
385
+ if event_type == "code:file:complete":
386
+ self.stats["total_files"] += 1
387
+ elif event_type == "code:node:found":
388
+ self.stats["total_nodes"] += 1
389
+
390
+ # Emit to Socket.IO clients
391
+ self._emit_event(event_type, event_data)
392
+
393
+ except json.JSONDecodeError:
394
+ # Not JSON, treat as log message
395
+ self.logger.debug(f"Analyzer output: {line}")
396
+ except Exception as e:
397
+ self.logger.warning(f"Error processing analyzer output: {e}")
398
+
399
+ def _emit_event(self, event_type: str, data: Dict[str, Any]):
400
+ """Emit an event to Socket.IO clients.
401
+
402
+ Args:
403
+ event_type: Type of event
404
+ data: Event data
405
+ """
406
+ if self.server:
407
+ # Add timestamp if not present
408
+ if "timestamp" not in data:
409
+ data["timestamp"] = datetime.utcnow().isoformat()
410
+
411
+ # Broadcast to all clients
412
+ self.server.broadcast_event(event_type, data)
413
+
414
+ def _emit_error(self, request_id: str, message: str):
415
+ """Emit an error event.
416
+
417
+ Args:
418
+ request_id: Request that caused the error
419
+ message: Error message
420
+ """
421
+ self._emit_event(
422
+ "code:analysis:error",
423
+ {
424
+ "request_id": request_id,
425
+ "message": message,
426
+ "timestamp": datetime.utcnow().isoformat(),
427
+ },
428
+ )