claude-mpm 4.2.9__py3-none-any.whl → 4.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/cli/commands/dashboard.py +59 -126
- claude_mpm/cli/commands/monitor.py +82 -212
- claude_mpm/cli/commands/run.py +33 -33
- claude_mpm/cli/parsers/monitor_parser.py +12 -2
- claude_mpm/dashboard/static/css/code-tree.css +8 -16
- claude_mpm/dashboard/static/dist/components/code-tree.js +1 -1
- claude_mpm/dashboard/static/dist/components/file-viewer.js +2 -0
- claude_mpm/dashboard/static/dist/components/module-viewer.js +1 -1
- claude_mpm/dashboard/static/dist/components/unified-data-viewer.js +1 -1
- claude_mpm/dashboard/static/dist/dashboard.js +1 -1
- claude_mpm/dashboard/static/dist/socket-client.js +1 -1
- claude_mpm/dashboard/static/js/components/code-tree.js +692 -114
- claude_mpm/dashboard/static/js/components/file-viewer.js +538 -0
- claude_mpm/dashboard/static/js/components/module-viewer.js +26 -0
- claude_mpm/dashboard/static/js/components/unified-data-viewer.js +166 -14
- claude_mpm/dashboard/static/js/dashboard.js +108 -91
- claude_mpm/dashboard/static/js/socket-client.js +9 -7
- claude_mpm/dashboard/templates/index.html +2 -7
- claude_mpm/hooks/claude_hooks/hook_handler.py +1 -11
- claude_mpm/hooks/claude_hooks/services/connection_manager.py +54 -59
- claude_mpm/hooks/claude_hooks/services/connection_manager_http.py +112 -72
- claude_mpm/services/agents/deployment/agent_template_builder.py +0 -1
- claude_mpm/services/cli/unified_dashboard_manager.py +354 -0
- claude_mpm/services/monitor/__init__.py +20 -0
- claude_mpm/services/monitor/daemon.py +378 -0
- claude_mpm/services/monitor/event_emitter.py +342 -0
- claude_mpm/services/monitor/handlers/__init__.py +20 -0
- claude_mpm/services/monitor/handlers/code_analysis.py +334 -0
- claude_mpm/services/monitor/handlers/dashboard.py +298 -0
- claude_mpm/services/monitor/handlers/hooks.py +491 -0
- claude_mpm/services/monitor/management/__init__.py +18 -0
- claude_mpm/services/monitor/management/health.py +124 -0
- claude_mpm/services/monitor/management/lifecycle.py +338 -0
- claude_mpm/services/monitor/server.py +596 -0
- claude_mpm/tools/code_tree_analyzer.py +33 -17
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/METADATA +1 -1
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/RECORD +42 -37
- claude_mpm/cli/commands/socketio_monitor.py +0 -233
- claude_mpm/scripts/socketio_daemon.py +0 -571
- claude_mpm/scripts/socketio_daemon_hardened.py +0 -937
- claude_mpm/scripts/socketio_daemon_wrapper.py +0 -78
- claude_mpm/scripts/socketio_server_manager.py +0 -349
- claude_mpm/services/cli/dashboard_launcher.py +0 -423
- claude_mpm/services/cli/socketio_manager.py +0 -595
- claude_mpm/services/dashboard/stable_server.py +0 -1020
- claude_mpm/services/socketio/monitor_server.py +0 -505
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/WHEEL +0 -0
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.2.9.dist-info → claude_mpm-4.2.12.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unified Monitor Daemon for Claude MPM
|
|
3
|
+
=====================================
|
|
4
|
+
|
|
5
|
+
WHY: This is the main daemon process that provides a single, stable way to
|
|
6
|
+
launch all monitoring functionality. It combines HTTP dashboard serving,
|
|
7
|
+
Socket.IO event handling, real AST analysis, and Claude Code hook ingestion.
|
|
8
|
+
|
|
9
|
+
DESIGN DECISIONS:
|
|
10
|
+
- Single process replaces multiple competing server implementations
|
|
11
|
+
- Daemon-ready with proper lifecycle management
|
|
12
|
+
- Real AST analysis using CodeTreeAnalyzer
|
|
13
|
+
- Single port (8765) for all functionality
|
|
14
|
+
- Built on proven aiohttp + socketio foundation
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import signal
|
|
19
|
+
import sys
|
|
20
|
+
import threading
|
|
21
|
+
import time
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from ...core.logging_config import get_logger
|
|
26
|
+
from .management.health import HealthMonitor
|
|
27
|
+
from .management.lifecycle import DaemonLifecycle
|
|
28
|
+
from .server import UnifiedMonitorServer
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class UnifiedMonitorDaemon:
|
|
32
|
+
"""Unified daemon process for all Claude MPM monitoring functionality.
|
|
33
|
+
|
|
34
|
+
WHY: Provides a single, stable entry point for launching monitoring services.
|
|
35
|
+
Replaces the multiple competing server implementations with one cohesive daemon.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
host: str = "localhost",
|
|
41
|
+
port: int = 8765,
|
|
42
|
+
daemon_mode: bool = False,
|
|
43
|
+
pid_file: Optional[str] = None,
|
|
44
|
+
log_file: Optional[str] = None,
|
|
45
|
+
):
|
|
46
|
+
"""Initialize the unified monitor daemon.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
host: Host to bind to
|
|
50
|
+
port: Port to bind to
|
|
51
|
+
daemon_mode: Whether to run as background daemon
|
|
52
|
+
pid_file: Path to PID file for daemon mode
|
|
53
|
+
log_file: Path to log file for daemon mode
|
|
54
|
+
"""
|
|
55
|
+
self.host = host
|
|
56
|
+
self.port = port
|
|
57
|
+
self.daemon_mode = daemon_mode
|
|
58
|
+
self.logger = get_logger(__name__)
|
|
59
|
+
|
|
60
|
+
# Daemon management
|
|
61
|
+
self.lifecycle = DaemonLifecycle(
|
|
62
|
+
pid_file=pid_file or self._get_default_pid_file(), log_file=log_file
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
# Core server
|
|
66
|
+
self.server = UnifiedMonitorServer(host=host, port=port)
|
|
67
|
+
|
|
68
|
+
# Health monitoring
|
|
69
|
+
self.health_monitor = HealthMonitor(port=port)
|
|
70
|
+
|
|
71
|
+
# State
|
|
72
|
+
self.running = False
|
|
73
|
+
self.shutdown_event = threading.Event()
|
|
74
|
+
|
|
75
|
+
def _get_default_pid_file(self) -> str:
|
|
76
|
+
"""Get default PID file path."""
|
|
77
|
+
project_root = Path.cwd()
|
|
78
|
+
claude_mpm_dir = project_root / ".claude-mpm"
|
|
79
|
+
claude_mpm_dir.mkdir(exist_ok=True)
|
|
80
|
+
return str(claude_mpm_dir / "monitor-daemon.pid")
|
|
81
|
+
|
|
82
|
+
def start(self) -> bool:
|
|
83
|
+
"""Start the unified monitor daemon.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if started successfully, False otherwise
|
|
87
|
+
"""
|
|
88
|
+
try:
|
|
89
|
+
if self.daemon_mode:
|
|
90
|
+
return self._start_daemon()
|
|
91
|
+
return self._start_foreground()
|
|
92
|
+
except Exception as e:
|
|
93
|
+
self.logger.error(f"Failed to start unified monitor daemon: {e}")
|
|
94
|
+
return False
|
|
95
|
+
|
|
96
|
+
def _start_daemon(self) -> bool:
|
|
97
|
+
"""Start as background daemon process."""
|
|
98
|
+
self.logger.info("Starting unified monitor daemon in background mode")
|
|
99
|
+
|
|
100
|
+
# Check if already running
|
|
101
|
+
if self.lifecycle.is_running():
|
|
102
|
+
existing_pid = self.lifecycle.get_pid()
|
|
103
|
+
self.logger.warning(f"Daemon already running with PID {existing_pid}")
|
|
104
|
+
return False
|
|
105
|
+
|
|
106
|
+
# Wait for any pre-warming threads to complete before forking
|
|
107
|
+
self._wait_for_prewarm_completion()
|
|
108
|
+
|
|
109
|
+
# Daemonize the process
|
|
110
|
+
success = self.lifecycle.daemonize()
|
|
111
|
+
if not success:
|
|
112
|
+
return False
|
|
113
|
+
|
|
114
|
+
# Start the server in daemon mode
|
|
115
|
+
return self._run_server()
|
|
116
|
+
|
|
117
|
+
def _start_foreground(self) -> bool:
|
|
118
|
+
"""Start in foreground mode."""
|
|
119
|
+
self.logger.info(f"Starting unified monitor daemon on {self.host}:{self.port}")
|
|
120
|
+
|
|
121
|
+
# Check if already running (check PID file even in foreground mode)
|
|
122
|
+
if self.lifecycle.is_running():
|
|
123
|
+
existing_pid = self.lifecycle.get_pid()
|
|
124
|
+
self.logger.warning(
|
|
125
|
+
f"Monitor daemon already running with PID {existing_pid}"
|
|
126
|
+
)
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
# Setup signal handlers for graceful shutdown
|
|
130
|
+
self._setup_signal_handlers()
|
|
131
|
+
|
|
132
|
+
# Write PID file for foreground mode too (so other processes can detect it)
|
|
133
|
+
self.lifecycle.write_pid_file()
|
|
134
|
+
|
|
135
|
+
# Start the server
|
|
136
|
+
return self._run_server()
|
|
137
|
+
|
|
138
|
+
def _run_server(self) -> bool:
|
|
139
|
+
"""Run the main server loop."""
|
|
140
|
+
try:
|
|
141
|
+
# Start health monitoring
|
|
142
|
+
self.health_monitor.start()
|
|
143
|
+
|
|
144
|
+
# Start the unified server
|
|
145
|
+
success = self.server.start()
|
|
146
|
+
if not success:
|
|
147
|
+
self.logger.error("Failed to start unified monitor server")
|
|
148
|
+
return False
|
|
149
|
+
|
|
150
|
+
self.running = True
|
|
151
|
+
self.logger.info("Unified monitor daemon started successfully")
|
|
152
|
+
|
|
153
|
+
# Keep running until shutdown
|
|
154
|
+
if self.daemon_mode:
|
|
155
|
+
# In daemon mode, run until shutdown signal
|
|
156
|
+
while self.running and not self.shutdown_event.is_set():
|
|
157
|
+
time.sleep(1)
|
|
158
|
+
else:
|
|
159
|
+
# In foreground mode, run until interrupted
|
|
160
|
+
try:
|
|
161
|
+
while self.running:
|
|
162
|
+
time.sleep(1)
|
|
163
|
+
except KeyboardInterrupt:
|
|
164
|
+
self.logger.info("Received keyboard interrupt, shutting down...")
|
|
165
|
+
|
|
166
|
+
return True
|
|
167
|
+
|
|
168
|
+
except Exception as e:
|
|
169
|
+
self.logger.error(f"Error running unified monitor daemon: {e}")
|
|
170
|
+
return False
|
|
171
|
+
finally:
|
|
172
|
+
self._cleanup()
|
|
173
|
+
|
|
174
|
+
def stop(self) -> bool:
|
|
175
|
+
"""Stop the unified monitor daemon.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
True if stopped successfully, False otherwise
|
|
179
|
+
"""
|
|
180
|
+
try:
|
|
181
|
+
self.logger.info("Stopping unified monitor daemon")
|
|
182
|
+
|
|
183
|
+
# Signal shutdown
|
|
184
|
+
self.running = False
|
|
185
|
+
self.shutdown_event.set()
|
|
186
|
+
|
|
187
|
+
# Stop server with proper cleanup
|
|
188
|
+
if self.server:
|
|
189
|
+
self.logger.debug("Initiating server shutdown...")
|
|
190
|
+
self.server.stop()
|
|
191
|
+
# Give asyncio loops adequate time to cleanup properly
|
|
192
|
+
# This is critical to prevent kqueue errors
|
|
193
|
+
time.sleep(2.0)
|
|
194
|
+
self.server = None
|
|
195
|
+
|
|
196
|
+
# Stop health monitoring
|
|
197
|
+
if self.health_monitor:
|
|
198
|
+
self.logger.debug("Stopping health monitor...")
|
|
199
|
+
self.health_monitor.stop()
|
|
200
|
+
self.health_monitor = None
|
|
201
|
+
|
|
202
|
+
# Clean up any asyncio resources
|
|
203
|
+
self._cleanup_asyncio_resources()
|
|
204
|
+
|
|
205
|
+
# Give a final moment for OS-level cleanup
|
|
206
|
+
time.sleep(0.5)
|
|
207
|
+
|
|
208
|
+
# Cleanup daemon files (always cleanup PID file)
|
|
209
|
+
self.lifecycle.cleanup()
|
|
210
|
+
|
|
211
|
+
self.logger.info("Unified monitor daemon stopped")
|
|
212
|
+
return True
|
|
213
|
+
|
|
214
|
+
except Exception as e:
|
|
215
|
+
self.logger.error(f"Error stopping unified monitor daemon: {e}")
|
|
216
|
+
return False
|
|
217
|
+
|
|
218
|
+
def restart(self) -> bool:
|
|
219
|
+
"""Restart the unified monitor daemon.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
True if restarted successfully, False otherwise
|
|
223
|
+
"""
|
|
224
|
+
self.logger.info("Restarting unified monitor daemon")
|
|
225
|
+
|
|
226
|
+
# Stop first
|
|
227
|
+
if not self.stop():
|
|
228
|
+
return False
|
|
229
|
+
|
|
230
|
+
# Wait longer for port to be released properly
|
|
231
|
+
# This is needed because the daemon process may take time to fully cleanup
|
|
232
|
+
self.logger.info("Waiting for port to be fully released...")
|
|
233
|
+
time.sleep(3)
|
|
234
|
+
|
|
235
|
+
# Start again
|
|
236
|
+
return self.start()
|
|
237
|
+
|
|
238
|
+
def status(self) -> dict:
|
|
239
|
+
"""Get daemon status information.
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
Dictionary with status information
|
|
243
|
+
"""
|
|
244
|
+
# Always check the PID file to see if a daemon is running
|
|
245
|
+
# This ensures we detect daemons started by other processes
|
|
246
|
+
is_running = self.lifecycle.is_running()
|
|
247
|
+
pid = self.lifecycle.get_pid()
|
|
248
|
+
|
|
249
|
+
# If no PID file exists but we're running in the current process
|
|
250
|
+
if not is_running and self.running:
|
|
251
|
+
is_running = True
|
|
252
|
+
pid = os.getpid()
|
|
253
|
+
|
|
254
|
+
status = {
|
|
255
|
+
"running": is_running,
|
|
256
|
+
"pid": pid if is_running else None,
|
|
257
|
+
"host": self.host,
|
|
258
|
+
"port": self.port,
|
|
259
|
+
"daemon_mode": self.daemon_mode,
|
|
260
|
+
"health": (
|
|
261
|
+
self.health_monitor.get_status() if self.health_monitor else "unknown"
|
|
262
|
+
),
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if self.server:
|
|
266
|
+
status.update(self.server.get_status())
|
|
267
|
+
|
|
268
|
+
return status
|
|
269
|
+
|
|
270
|
+
def _setup_signal_handlers(self):
|
|
271
|
+
"""Setup signal handlers for graceful shutdown."""
|
|
272
|
+
|
|
273
|
+
def signal_handler(signum, frame):
|
|
274
|
+
self.logger.info(f"Received signal {signum}, shutting down...")
|
|
275
|
+
self.stop()
|
|
276
|
+
sys.exit(0)
|
|
277
|
+
|
|
278
|
+
signal.signal(signal.SIGINT, signal_handler)
|
|
279
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
|
280
|
+
|
|
281
|
+
def _cleanup(self):
|
|
282
|
+
"""Cleanup resources."""
|
|
283
|
+
try:
|
|
284
|
+
# Stop server first with proper cleanup
|
|
285
|
+
if self.server:
|
|
286
|
+
self.logger.debug("Stopping server and cleaning up event loops...")
|
|
287
|
+
self.server.stop()
|
|
288
|
+
# Give the server more time to cleanup event loops properly
|
|
289
|
+
# This is critical to prevent kqueue errors
|
|
290
|
+
time.sleep(1.5)
|
|
291
|
+
self.server = None
|
|
292
|
+
|
|
293
|
+
# Then stop health monitor
|
|
294
|
+
if self.health_monitor:
|
|
295
|
+
self.logger.debug("Stopping health monitor...")
|
|
296
|
+
self.health_monitor.stop()
|
|
297
|
+
self.health_monitor = None
|
|
298
|
+
|
|
299
|
+
# Ensure PID file is removed
|
|
300
|
+
if not self.daemon_mode:
|
|
301
|
+
# In foreground mode, make sure we cleanup the PID file
|
|
302
|
+
self.lifecycle.cleanup()
|
|
303
|
+
|
|
304
|
+
# Clean up any remaining asyncio resources in the main thread
|
|
305
|
+
self._cleanup_asyncio_resources()
|
|
306
|
+
|
|
307
|
+
# Clear any remaining references
|
|
308
|
+
self.shutdown_event.clear()
|
|
309
|
+
|
|
310
|
+
self.logger.debug("Cleanup completed successfully")
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
self.logger.error(f"Error during cleanup: {e}")
|
|
314
|
+
|
|
315
|
+
def _cleanup_asyncio_resources(self):
|
|
316
|
+
"""Clean up any asyncio resources in the current thread."""
|
|
317
|
+
try:
|
|
318
|
+
import asyncio
|
|
319
|
+
|
|
320
|
+
# Try to get the current event loop
|
|
321
|
+
try:
|
|
322
|
+
loop = asyncio.get_event_loop()
|
|
323
|
+
if loop and not loop.is_closed():
|
|
324
|
+
# Cancel any pending tasks
|
|
325
|
+
pending = asyncio.all_tasks(loop)
|
|
326
|
+
for task in pending:
|
|
327
|
+
task.cancel()
|
|
328
|
+
|
|
329
|
+
# Stop and close the loop
|
|
330
|
+
if loop.is_running():
|
|
331
|
+
loop.stop()
|
|
332
|
+
|
|
333
|
+
# Clear the event loop from the thread
|
|
334
|
+
asyncio.set_event_loop(None)
|
|
335
|
+
|
|
336
|
+
# Close the loop
|
|
337
|
+
loop.close()
|
|
338
|
+
|
|
339
|
+
except RuntimeError:
|
|
340
|
+
# No event loop in current thread, that's fine
|
|
341
|
+
pass
|
|
342
|
+
|
|
343
|
+
except Exception as e:
|
|
344
|
+
self.logger.debug(f"Error cleaning up asyncio resources: {e}")
|
|
345
|
+
|
|
346
|
+
def _wait_for_prewarm_completion(self, timeout: float = 5.0):
|
|
347
|
+
"""Wait for MCP pre-warming threads to complete before forking.
|
|
348
|
+
|
|
349
|
+
This prevents inherited threads and event loops in the forked process.
|
|
350
|
+
"""
|
|
351
|
+
try:
|
|
352
|
+
import threading
|
|
353
|
+
import time
|
|
354
|
+
|
|
355
|
+
start_time = time.time()
|
|
356
|
+
|
|
357
|
+
# Get all non-daemon threads (pre-warm threads are daemon threads)
|
|
358
|
+
# but we still want to give them a moment to complete
|
|
359
|
+
active_threads = [
|
|
360
|
+
t
|
|
361
|
+
for t in threading.enumerate()
|
|
362
|
+
if t.is_alive() and t != threading.current_thread()
|
|
363
|
+
]
|
|
364
|
+
|
|
365
|
+
if active_threads:
|
|
366
|
+
self.logger.debug(
|
|
367
|
+
f"Waiting for {len(active_threads)} threads to complete"
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
# Wait briefly for threads to complete
|
|
371
|
+
wait_time = min(timeout, 2.0) # Max 2 seconds for daemon threads
|
|
372
|
+
time.sleep(wait_time)
|
|
373
|
+
|
|
374
|
+
elapsed = time.time() - start_time
|
|
375
|
+
self.logger.debug(f"Waited {elapsed:.2f}s for thread completion")
|
|
376
|
+
|
|
377
|
+
except Exception as e:
|
|
378
|
+
self.logger.debug(f"Error waiting for threads: {e}")
|