claude-mpm 4.2.11__py3-none-any.whl → 4.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
claude_mpm/VERSION CHANGED
@@ -1 +1 @@
1
- 4.2.11
1
+ 4.2.13
@@ -70,10 +70,20 @@ class MonitorCommand(BaseCommand):
70
70
  if port is None:
71
71
  port = 8765 # Default to 8765 for unified monitor
72
72
  host = getattr(args, "host", "localhost")
73
- daemon_mode = getattr(args, "daemon", False) # Default to foreground
74
73
 
74
+ # Check for explicit foreground flag first, then background flag
75
+ # Default to daemon/background mode if neither specified
76
+ if getattr(args, "foreground", False):
77
+ daemon_mode = False
78
+ elif getattr(args, "background", None) is not None:
79
+ daemon_mode = getattr(args, "background", False)
80
+ else:
81
+ # Default to daemon/background mode
82
+ daemon_mode = True
83
+
84
+ mode_str = "background/daemon" if daemon_mode else "foreground"
75
85
  self.logger.info(
76
- f"Starting unified monitor daemon on {host}:{port} (daemon: {daemon_mode})"
86
+ f"Starting unified monitor daemon on {host}:{port} (mode: {mode_str})"
77
87
  )
78
88
 
79
89
  # Create unified monitor daemon
@@ -95,9 +105,10 @@ class MonitorCommand(BaseCommand):
95
105
 
96
106
  # Start the daemon
97
107
  if self.daemon.start():
108
+ mode_info = " in background" if daemon_mode else " in foreground"
98
109
  return CommandResult.success_result(
99
- f"Unified monitor daemon started on {host}:{port}",
100
- data={"url": f"http://{host}:{port}", "port": port},
110
+ f"Unified monitor daemon started on {host}:{port}{mode_info}",
111
+ data={"url": f"http://{host}:{port}", "port": port, "mode": mode_str},
101
112
  )
102
113
  return CommandResult.error_result("Failed to start unified monitor daemon")
103
114
 
@@ -111,8 +122,8 @@ class MonitorCommand(BaseCommand):
111
122
  if not daemon.lifecycle.is_running():
112
123
  return CommandResult.success_result("No unified monitor daemon running")
113
124
 
114
- # Stop the daemon
115
- if daemon.stop():
125
+ # Stop the daemon by PID (works for both daemon and foreground mode)
126
+ if daemon.lifecycle.stop_daemon():
116
127
  return CommandResult.success_result("Unified monitor daemon stopped")
117
128
  return CommandResult.error_result("Failed to stop unified monitor daemon")
118
129
 
@@ -58,8 +58,18 @@ def add_monitor_subparser(subparsers) -> argparse.ArgumentParser:
58
58
  default=8766,
59
59
  help="Dashboard port (default: 8766)",
60
60
  )
61
- start_monitor_parser.add_argument(
62
- "--background", action="store_true", help="Run server in background"
61
+ # Add mutually exclusive group for foreground/background modes
62
+ mode_group = start_monitor_parser.add_mutually_exclusive_group()
63
+ mode_group.add_argument(
64
+ "--foreground",
65
+ action="store_true",
66
+ help="Run server in foreground mode (blocks terminal)",
67
+ )
68
+ mode_group.add_argument(
69
+ "--background",
70
+ action="store_true",
71
+ default=None, # None allows us to detect when not specified
72
+ help="Run server in background/daemon mode (default behavior)",
63
73
  )
64
74
  start_monitor_parser.add_argument(
65
75
  "--force",
@@ -2,11 +2,47 @@
2
2
 
3
3
  Show all available Claude MPM agents with their versions and deployment status.
4
4
 
5
- This command displays:
6
- - Agent names and descriptions
7
- - Version information
8
- - Tool availability
9
- - Model preferences
10
- - Deployment status
11
-
12
- Usage: /mpm-agents
5
+ ## Usage
6
+
7
+ ```
8
+ /mpm-agents
9
+ ```
10
+
11
+ ## Description
12
+
13
+ This command lists all available Claude MPM agents, including both built-in agents and any custom agents you've created. It shows their current deployment status, version information, and capabilities.
14
+
15
+ ## What This Command Does
16
+
17
+ When you run `/mpm-agents`, I will:
18
+
19
+ 1. **List Available Agents**: Run `claude-mpm agents list` to show all agents
20
+ 2. **Display Agent Information**:
21
+ - Agent names and IDs
22
+ - Brief descriptions
23
+ - Model preferences (opus, sonnet, haiku)
24
+ - Tool availability
25
+ - Version information
26
+ - Deployment status
27
+
28
+ ## Output Example
29
+
30
+ The command displays agents in a formatted table showing:
31
+ - Agent name and description
32
+ - Version and model preference
33
+ - Tools available to the agent
34
+ - Current deployment status
35
+
36
+ ## Implementation
37
+
38
+ To show available agents, I'll execute:
39
+ ```bash
40
+ claude-mpm agents list --deployed
41
+ ```
42
+
43
+ This will display all deployed agents that are currently available for use.
44
+
45
+ Alternatively, you can use these variations:
46
+ - `claude-mpm agents list --system` - Show system agents
47
+ - `claude-mpm agents list --by-tier` - Group agents by precedence tier
48
+ - `claude-mpm agents list --all` - Show all agents including undeployed
@@ -103,6 +103,9 @@ class UnifiedMonitorDaemon:
103
103
  self.logger.warning(f"Daemon already running with PID {existing_pid}")
104
104
  return False
105
105
 
106
+ # Wait for any pre-warming threads to complete before forking
107
+ self._wait_for_prewarm_completion()
108
+
106
109
  # Daemonize the process
107
110
  success = self.lifecycle.daemonize()
108
111
  if not success:
@@ -115,9 +118,20 @@ class UnifiedMonitorDaemon:
115
118
  """Start in foreground mode."""
116
119
  self.logger.info(f"Starting unified monitor daemon on {self.host}:{self.port}")
117
120
 
121
+ # Check if already running (check PID file even in foreground mode)
122
+ if self.lifecycle.is_running():
123
+ existing_pid = self.lifecycle.get_pid()
124
+ self.logger.warning(
125
+ f"Monitor daemon already running with PID {existing_pid}"
126
+ )
127
+ return False
128
+
118
129
  # Setup signal handlers for graceful shutdown
119
130
  self._setup_signal_handlers()
120
131
 
132
+ # Write PID file for foreground mode too (so other processes can detect it)
133
+ self.lifecycle.write_pid_file()
134
+
121
135
  # Start the server
122
136
  return self._run_server()
123
137
 
@@ -170,17 +184,29 @@ class UnifiedMonitorDaemon:
170
184
  self.running = False
171
185
  self.shutdown_event.set()
172
186
 
173
- # Stop server
187
+ # Stop server with proper cleanup
174
188
  if self.server:
189
+ self.logger.debug("Initiating server shutdown...")
175
190
  self.server.stop()
191
+ # Give asyncio loops adequate time to cleanup properly
192
+ # This is critical to prevent kqueue errors
193
+ time.sleep(2.0)
194
+ self.server = None
176
195
 
177
196
  # Stop health monitoring
178
197
  if self.health_monitor:
198
+ self.logger.debug("Stopping health monitor...")
179
199
  self.health_monitor.stop()
200
+ self.health_monitor = None
180
201
 
181
- # Cleanup daemon files
182
- if self.daemon_mode:
183
- self.lifecycle.cleanup()
202
+ # Clean up any asyncio resources
203
+ self._cleanup_asyncio_resources()
204
+
205
+ # Give a final moment for OS-level cleanup
206
+ time.sleep(0.5)
207
+
208
+ # Cleanup daemon files (always cleanup PID file)
209
+ self.lifecycle.cleanup()
184
210
 
185
211
  self.logger.info("Unified monitor daemon stopped")
186
212
  return True
@@ -201,8 +227,10 @@ class UnifiedMonitorDaemon:
201
227
  if not self.stop():
202
228
  return False
203
229
 
204
- # Wait a moment
205
- time.sleep(2)
230
+ # Wait longer for port to be released properly
231
+ # This is needed because the daemon process may take time to fully cleanup
232
+ self.logger.info("Waiting for port to be fully released...")
233
+ time.sleep(3)
206
234
 
207
235
  # Start again
208
236
  return self.start()
@@ -213,12 +241,19 @@ class UnifiedMonitorDaemon:
213
241
  Returns:
214
242
  Dictionary with status information
215
243
  """
216
- is_running = self.lifecycle.is_running() if self.daemon_mode else self.running
217
- pid = self.lifecycle.get_pid() if self.daemon_mode else os.getpid()
244
+ # Always check the PID file to see if a daemon is running
245
+ # This ensures we detect daemons started by other processes
246
+ is_running = self.lifecycle.is_running()
247
+ pid = self.lifecycle.get_pid()
248
+
249
+ # If no PID file exists but we're running in the current process
250
+ if not is_running and self.running:
251
+ is_running = True
252
+ pid = os.getpid()
218
253
 
219
254
  status = {
220
255
  "running": is_running,
221
- "pid": pid,
256
+ "pid": pid if is_running else None,
222
257
  "host": self.host,
223
258
  "port": self.port,
224
259
  "daemon_mode": self.daemon_mode,
@@ -246,11 +281,98 @@ class UnifiedMonitorDaemon:
246
281
  def _cleanup(self):
247
282
  """Cleanup resources."""
248
283
  try:
284
+ # Stop server first with proper cleanup
249
285
  if self.server:
286
+ self.logger.debug("Stopping server and cleaning up event loops...")
250
287
  self.server.stop()
288
+ # Give the server more time to cleanup event loops properly
289
+ # This is critical to prevent kqueue errors
290
+ time.sleep(1.5)
291
+ self.server = None
251
292
 
293
+ # Then stop health monitor
252
294
  if self.health_monitor:
295
+ self.logger.debug("Stopping health monitor...")
253
296
  self.health_monitor.stop()
297
+ self.health_monitor = None
298
+
299
+ # Ensure PID file is removed
300
+ if not self.daemon_mode:
301
+ # In foreground mode, make sure we cleanup the PID file
302
+ self.lifecycle.cleanup()
303
+
304
+ # Clean up any remaining asyncio resources in the main thread
305
+ self._cleanup_asyncio_resources()
306
+
307
+ # Clear any remaining references
308
+ self.shutdown_event.clear()
309
+
310
+ self.logger.debug("Cleanup completed successfully")
254
311
 
255
312
  except Exception as e:
256
313
  self.logger.error(f"Error during cleanup: {e}")
314
+
315
+ def _cleanup_asyncio_resources(self):
316
+ """Clean up any asyncio resources in the current thread."""
317
+ try:
318
+ import asyncio
319
+
320
+ # Try to get the current event loop
321
+ try:
322
+ loop = asyncio.get_event_loop()
323
+ if loop and not loop.is_closed():
324
+ # Cancel any pending tasks
325
+ pending = asyncio.all_tasks(loop)
326
+ for task in pending:
327
+ task.cancel()
328
+
329
+ # Stop and close the loop
330
+ if loop.is_running():
331
+ loop.stop()
332
+
333
+ # Clear the event loop from the thread
334
+ asyncio.set_event_loop(None)
335
+
336
+ # Close the loop
337
+ loop.close()
338
+
339
+ except RuntimeError:
340
+ # No event loop in current thread, that's fine
341
+ pass
342
+
343
+ except Exception as e:
344
+ self.logger.debug(f"Error cleaning up asyncio resources: {e}")
345
+
346
+ def _wait_for_prewarm_completion(self, timeout: float = 5.0):
347
+ """Wait for MCP pre-warming threads to complete before forking.
348
+
349
+ This prevents inherited threads and event loops in the forked process.
350
+ """
351
+ try:
352
+ import threading
353
+ import time
354
+
355
+ start_time = time.time()
356
+
357
+ # Get all non-daemon threads (pre-warm threads are daemon threads)
358
+ # but we still want to give them a moment to complete
359
+ active_threads = [
360
+ t
361
+ for t in threading.enumerate()
362
+ if t.is_alive() and t != threading.current_thread()
363
+ ]
364
+
365
+ if active_threads:
366
+ self.logger.debug(
367
+ f"Waiting for {len(active_threads)} threads to complete"
368
+ )
369
+
370
+ # Wait briefly for threads to complete
371
+ wait_time = min(timeout, 2.0) # Max 2 seconds for daemon threads
372
+ time.sleep(wait_time)
373
+
374
+ elapsed = time.time() - start_time
375
+ self.logger.debug(f"Waited {elapsed:.2f}s for thread completion")
376
+
377
+ except Exception as e:
378
+ self.logger.debug(f"Error waiting for threads: {e}")
@@ -251,20 +251,75 @@ class AsyncEventEmitter:
251
251
  }
252
252
 
253
253
  async def close(self):
254
- """Clean up resources."""
254
+ """Clean up resources with proper order and timing."""
255
255
  try:
256
- if self._http_session:
257
- await self._http_session.close()
256
+ # Cancel batch processor if running
257
+ if self._batch_processor_task and not self._batch_processor_task.done():
258
+ self._batch_processor_task.cancel()
259
+ try:
260
+ await self._batch_processor_task
261
+ except asyncio.CancelledError:
262
+ pass
263
+ except Exception as e:
264
+ self.logger.debug(f"Error cancelling batch processor: {e}")
265
+ finally:
266
+ self._batch_processor_task = None
267
+
268
+ # Clear Socket.IO server references first
269
+ # This prevents any new events from being emitted
270
+ self._socketio_servers.clear()
258
271
 
272
+ # Close HTTP session (must be done before connector)
273
+ if self._http_session:
274
+ try:
275
+ # Cancel any pending requests
276
+ if (
277
+ hasattr(self._http_session, "_connector")
278
+ and self._http_session._connector
279
+ ):
280
+ # Give ongoing requests a moment to complete
281
+ await asyncio.sleep(0.1)
282
+
283
+ # Close the session
284
+ await self._http_session.close()
285
+
286
+ # CRITICAL: Wait for session to fully close
287
+ # This prevents the "I/O operation on closed kqueue" error
288
+ await asyncio.sleep(0.25)
289
+
290
+ except Exception as e:
291
+ self.logger.debug(f"Error closing HTTP session: {e}")
292
+ finally:
293
+ self._http_session = None
294
+
295
+ # Then close the connector (after session is fully closed)
259
296
  if self._http_connector:
260
- await self._http_connector.close()
297
+ try:
298
+ # Close the connector
299
+ await self._http_connector.close()
261
300
 
262
- self._socketio_servers.clear()
301
+ # Give the connector adequate time to close all connections
302
+ # This is critical for preventing kqueue errors
303
+ await asyncio.sleep(0.5)
304
+
305
+ except Exception as e:
306
+ self.logger.debug(f"Error closing HTTP connector: {e}")
307
+ finally:
308
+ self._http_connector = None
263
309
 
264
- self.logger.info("AsyncEventEmitter closed")
310
+ # Reset singleton instance
311
+ AsyncEventEmitter._instance = None
312
+
313
+ self.logger.info("AsyncEventEmitter closed successfully")
265
314
 
266
315
  except Exception as e:
267
316
  self.logger.error(f"Error closing AsyncEventEmitter: {e}")
317
+ finally:
318
+ # Ensure references are cleared even if errors occur
319
+ self._http_session = None
320
+ self._http_connector = None
321
+ self._socketio_servers.clear()
322
+ AsyncEventEmitter._instance = None
268
323
 
269
324
 
270
325
  # Global instance for easy access
@@ -277,3 +332,11 @@ async def get_event_emitter() -> AsyncEventEmitter:
277
332
  if _global_emitter is None:
278
333
  _global_emitter = await AsyncEventEmitter.get_instance()
279
334
  return _global_emitter
335
+
336
+
337
+ async def cleanup_event_emitter():
338
+ """Clean up the global event emitter instance."""
339
+ global _global_emitter
340
+ if _global_emitter is not None:
341
+ await _global_emitter.close()
342
+ _global_emitter = None
@@ -48,6 +48,9 @@ class DaemonLifecycle:
48
48
  True if daemonization successful, False otherwise
49
49
  """
50
50
  try:
51
+ # Clean up any existing asyncio event loops before forking
52
+ self._cleanup_event_loops()
53
+
51
54
  # First fork
52
55
  pid = os.fork()
53
56
  if pid > 0:
@@ -76,7 +79,7 @@ class DaemonLifecycle:
76
79
  self._redirect_streams()
77
80
 
78
81
  # Write PID file
79
- self._write_pid_file()
82
+ self.write_pid_file()
80
83
 
81
84
  # Setup signal handlers
82
85
  self._setup_signal_handlers()
@@ -110,7 +113,7 @@ class DaemonLifecycle:
110
113
  except Exception as e:
111
114
  self.logger.error(f"Error redirecting streams: {e}")
112
115
 
113
- def _write_pid_file(self):
116
+ def write_pid_file(self):
114
117
  """Write PID to PID file."""
115
118
  try:
116
119
  # Ensure parent directory exists
@@ -259,6 +262,43 @@ class DaemonLifecycle:
259
262
  except Exception as e:
260
263
  self.logger.error(f"Error removing stale PID file: {e}")
261
264
 
265
+ def _cleanup_event_loops(self):
266
+ """Clean up any existing asyncio event loops before forking.
267
+
268
+ This prevents the 'I/O operation on closed kqueue object' error
269
+ that occurs when forked processes inherit event loops.
270
+ """
271
+ try:
272
+ import asyncio
273
+ import gc
274
+
275
+ # Try to get the current event loop
276
+ try:
277
+ loop = asyncio.get_event_loop()
278
+ if loop and loop.is_running():
279
+ # Can't close a running loop, but we can stop it
280
+ loop.stop()
281
+ self.logger.debug("Stopped running event loop before fork")
282
+ elif loop:
283
+ # Close the loop if it exists and is not running
284
+ loop.close()
285
+ self.logger.debug("Closed event loop before fork")
286
+ except RuntimeError:
287
+ # No event loop in current thread
288
+ pass
289
+
290
+ # Clear the event loop policy to ensure clean state
291
+ asyncio.set_event_loop(None)
292
+
293
+ # Force garbage collection to clean up any loop resources
294
+ gc.collect()
295
+
296
+ except ImportError:
297
+ # asyncio not available (unlikely but handle it)
298
+ pass
299
+ except Exception as e:
300
+ self.logger.debug(f"Error cleaning up event loops before fork: {e}")
301
+
262
302
  def get_status(self) -> dict:
263
303
  """Get daemon status information.
264
304
 
@@ -112,19 +112,61 @@ class UnifiedMonitorServer:
112
112
 
113
113
  def _run_server(self):
114
114
  """Run the server in its own event loop."""
115
+ loop = None
115
116
  try:
116
117
  # Create new event loop for this thread
117
- self.loop = asyncio.new_event_loop()
118
- asyncio.set_event_loop(self.loop)
118
+ loop = asyncio.new_event_loop()
119
+ asyncio.set_event_loop(loop)
120
+ self.loop = loop
119
121
 
120
122
  # Run the async server
121
- self.loop.run_until_complete(self._start_async_server())
123
+ loop.run_until_complete(self._start_async_server())
122
124
 
123
125
  except Exception as e:
124
126
  self.logger.error(f"Error in server thread: {e}")
125
127
  finally:
126
- if self.loop:
127
- self.loop.close()
128
+ # Always ensure loop cleanup happens
129
+ if loop is not None:
130
+ try:
131
+ # Cancel all pending tasks first
132
+ self._cancel_all_tasks(loop)
133
+
134
+ # Give tasks a moment to cancel gracefully
135
+ if not loop.is_closed():
136
+ try:
137
+ loop.run_until_complete(asyncio.sleep(0.1))
138
+ except RuntimeError:
139
+ # Loop might be stopped already, that's ok
140
+ pass
141
+
142
+ except Exception as e:
143
+ self.logger.debug(f"Error during task cancellation: {e}")
144
+ finally:
145
+ try:
146
+ # Clear the loop reference from the instance first
147
+ self.loop = None
148
+
149
+ # Stop the loop if it's still running
150
+ if loop.is_running():
151
+ loop.stop()
152
+
153
+ # CRITICAL: Wait a moment for the loop to stop
154
+ import time
155
+
156
+ time.sleep(0.1)
157
+
158
+ # Clear the event loop from the thread BEFORE closing
159
+ # This prevents other code from accidentally using it
160
+ asyncio.set_event_loop(None)
161
+
162
+ # Now close the loop - this is critical to prevent the kqueue error
163
+ if not loop.is_closed():
164
+ loop.close()
165
+ # Wait for the close to complete
166
+ time.sleep(0.05)
167
+
168
+ except Exception as e:
169
+ self.logger.debug(f"Error during event loop cleanup: {e}")
128
170
 
129
171
  async def _start_async_server(self):
130
172
  """Start the async server components."""
@@ -392,12 +434,42 @@ class UnifiedMonitorServer:
392
434
  try:
393
435
  self.logger.info("Stopping unified monitor server")
394
436
 
437
+ # Signal shutdown first
395
438
  self.running = False
396
439
 
397
- # Wait for server thread to finish
440
+ # If we have a loop, schedule the cleanup
441
+ if self.loop and not self.loop.is_closed():
442
+ try:
443
+ # Use call_soon_threadsafe to schedule cleanup from another thread
444
+ future = asyncio.run_coroutine_threadsafe(
445
+ self._graceful_shutdown(), self.loop
446
+ )
447
+ # Wait for cleanup to complete (with timeout)
448
+ future.result(timeout=3)
449
+ except Exception as e:
450
+ self.logger.debug(f"Error during graceful shutdown: {e}")
451
+
452
+ # Wait for server thread to finish with a reasonable timeout
398
453
  if self.server_thread and self.server_thread.is_alive():
399
454
  self.server_thread.join(timeout=5)
400
455
 
456
+ # If thread is still alive after timeout, log a warning
457
+ if self.server_thread.is_alive():
458
+ self.logger.warning("Server thread did not stop within timeout")
459
+
460
+ # Clear all references to help with cleanup
461
+ self.server_thread = None
462
+ self.app = None
463
+ self.sio = None
464
+ self.runner = None
465
+ self.site = None
466
+ self.event_emitter = None
467
+
468
+ # Give the system a moment to cleanup resources
469
+ import time
470
+
471
+ time.sleep(0.2)
472
+
401
473
  self.logger.info("Unified monitor server stopped")
402
474
 
403
475
  except Exception as e:
@@ -406,20 +478,55 @@ class UnifiedMonitorServer:
406
478
  async def _cleanup_async(self):
407
479
  """Cleanup async resources."""
408
480
  try:
481
+ # Close the Socket.IO server first to stop accepting new connections
482
+ if self.sio:
483
+ try:
484
+ await self.sio.shutdown()
485
+ self.logger.debug("Socket.IO shutdown complete")
486
+ except Exception as e:
487
+ self.logger.debug(f"Error shutting down Socket.IO: {e}")
488
+ finally:
489
+ self.sio = None
490
+
409
491
  # Cleanup event emitter
410
492
  if self.event_emitter:
411
493
  try:
412
- self.event_emitter.unregister_socketio_server(self.sio)
413
- await self.event_emitter.close()
494
+ if self.sio:
495
+ self.event_emitter.unregister_socketio_server(self.sio)
496
+
497
+ # Use the global cleanup function to ensure proper cleanup
498
+ from .event_emitter import cleanup_event_emitter
499
+
500
+ await cleanup_event_emitter()
501
+
414
502
  self.logger.info("Event emitter cleaned up")
415
503
  except Exception as e:
416
504
  self.logger.warning(f"Error cleaning up event emitter: {e}")
505
+ finally:
506
+ self.event_emitter = None
417
507
 
508
+ # Stop the site (must be done before runner cleanup)
418
509
  if self.site:
419
- await self.site.stop()
510
+ try:
511
+ await self.site.stop()
512
+ self.logger.debug("Site stopped")
513
+ except Exception as e:
514
+ self.logger.debug(f"Error stopping site: {e}")
515
+ finally:
516
+ self.site = None
420
517
 
518
+ # Cleanup the runner (after site is stopped)
421
519
  if self.runner:
422
- await self.runner.cleanup()
520
+ try:
521
+ await self.runner.cleanup()
522
+ self.logger.debug("Runner cleaned up")
523
+ except Exception as e:
524
+ self.logger.debug(f"Error cleaning up runner: {e}")
525
+ finally:
526
+ self.runner = None
527
+
528
+ # Clear app reference
529
+ self.app = None
423
530
 
424
531
  except Exception as e:
425
532
  self.logger.error(f"Error during async cleanup: {e}")
@@ -440,3 +547,50 @@ class UnifiedMonitorServer:
440
547
  "hooks": self.hook_handler is not None,
441
548
  },
442
549
  }
550
+
551
+ def _cancel_all_tasks(self, loop=None):
552
+ """Cancel all pending tasks in the event loop."""
553
+ if loop is None:
554
+ loop = self.loop
555
+
556
+ if not loop or loop.is_closed():
557
+ return
558
+
559
+ try:
560
+ # Get all tasks in the loop
561
+ pending = asyncio.all_tasks(loop)
562
+
563
+ # Count tasks to cancel
564
+ tasks_to_cancel = [task for task in pending if not task.done()]
565
+
566
+ if tasks_to_cancel:
567
+ # Cancel each task
568
+ for task in tasks_to_cancel:
569
+ task.cancel()
570
+
571
+ # Wait for all tasks to complete cancellation
572
+ gather = asyncio.gather(*tasks_to_cancel, return_exceptions=True)
573
+ try:
574
+ loop.run_until_complete(gather)
575
+ except Exception:
576
+ # Some tasks might fail to cancel, that's ok
577
+ pass
578
+
579
+ self.logger.debug(f"Cancelled {len(tasks_to_cancel)} pending tasks")
580
+ except Exception as e:
581
+ self.logger.debug(f"Error cancelling tasks: {e}")
582
+
583
+ async def _graceful_shutdown(self):
584
+ """Perform graceful shutdown of async resources."""
585
+ try:
586
+ # Stop accepting new connections
587
+ self.running = False
588
+
589
+ # Give ongoing operations a moment to complete
590
+ await asyncio.sleep(0.5)
591
+
592
+ # Then cleanup resources
593
+ await self._cleanup_async()
594
+
595
+ except Exception as e:
596
+ self.logger.debug(f"Error in graceful shutdown: {e}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: claude-mpm
3
- Version: 4.2.11
3
+ Version: 4.2.13
4
4
  Summary: Claude Multi-Agent Project Manager - Orchestrate Claude with agent delegation and ticket tracking
5
5
  Author-email: Bob Matsuoka <bob@matsuoka.com>
6
6
  Maintainer: Claude MPM Team
@@ -1,5 +1,5 @@
1
1
  claude_mpm/BUILD_NUMBER,sha256=toytnNjkIKPgQaGwDqQdC1rpNTAdSEc6Vja50d7Ovug,4
2
- claude_mpm/VERSION,sha256=cd55Cs9g014dKFhr97AzpoJW-fW8NPYxthFqB5LQaw8,7
2
+ claude_mpm/VERSION,sha256=5mfyStzIK-4pnKftSypHjTQDTxKee3RKFk0VR8PFewQ,7
3
3
  claude_mpm/__init__.py,sha256=lyTZAYGH4DTaFGLRNWJKk5Q5oTjzN5I6AXmfVX-Jff0,1512
4
4
  claude_mpm/__main__.py,sha256=Ro5UBWBoQaSAIoSqWAr7zkbLyvi4sSy28WShqAhKJG0,723
5
5
  claude_mpm/constants.py,sha256=I946iCQzIIPRZVVJ8aO7lA4euiyDnNw2IX7EelAOkIE,5915
@@ -83,7 +83,7 @@ claude_mpm/cli/commands/mcp_pipx_config.py,sha256=sE62VD6Q1CcO2k1nlbIhHMfAJFQTZf
83
83
  claude_mpm/cli/commands/mcp_server_commands.py,sha256=-1G_2Y5ScTvzDd-kY8fTAao2H6FH7DnsLimleF1rVqQ,6197
84
84
  claude_mpm/cli/commands/mcp_tool_commands.py,sha256=q17GzlFT3JiLTrDqwPO2tz1-fKmPO5QU449syTnKTz4,1283
85
85
  claude_mpm/cli/commands/memory.py,sha256=Yzfs3_oiKciv3sfOoDm2lJL4M9idG7ARV3-sNw1ge_g,26186
86
- claude_mpm/cli/commands/monitor.py,sha256=G6bFgiEvV5nE_bl1C3KnxmZXLfSvIR6UPwYTYR3CLFg,6769
86
+ claude_mpm/cli/commands/monitor.py,sha256=wdbQ-xMVvRNo0X64nZaweMG13_P8WErRZP3AKT02Hjs,7365
87
87
  claude_mpm/cli/commands/mpm_init.py,sha256=lO7N91ZHn_n18XbchUUcYoyme7L5NLcXVnhWm5F_Gq8,22367
88
88
  claude_mpm/cli/commands/mpm_init_handler.py,sha256=-pCB0XL3KipqGtnta8CC7Lg5TPMwstEhMFBcgF4aaa4,2919
89
89
  claude_mpm/cli/commands/run.py,sha256=qS3eolLiDrE8EXLQJioB6kL1ONr_l0c3OE3qMUJCqbA,43489
@@ -100,7 +100,7 @@ claude_mpm/cli/parsers/dashboard_parser.py,sha256=JBCM6v_iZhADr_Fwtk_d3up9AOod1a
100
100
  claude_mpm/cli/parsers/debug_parser.py,sha256=F7MZdmiXiPfiIPMv21ZUqB2cMT8Ho1LDmpbvCXODLbY,9846
101
101
  claude_mpm/cli/parsers/mcp_parser.py,sha256=zW4wClYOGf_o8yRediJkQRxgta2RI6S3IG_QDgAkp-k,5992
102
102
  claude_mpm/cli/parsers/memory_parser.py,sha256=ZwCDxJEgp-w03L-1tZsWTgisiwamP42s424bA5bvDJc,4760
103
- claude_mpm/cli/parsers/monitor_parser.py,sha256=Go78VOFQCts2fouv33tKfd303w0mVPpJiuBbRBGK5x0,4525
103
+ claude_mpm/cli/parsers/monitor_parser.py,sha256=PeoznSi_5Bw6THK_Espl8M20o6dKvvBSmFzAbovkaFQ,4920
104
104
  claude_mpm/cli/parsers/mpm_init_parser.py,sha256=PxAydGDwTmgi0lu-cQHLnk4286EBzK84StcT8Msvk0I,4791
105
105
  claude_mpm/cli/parsers/run_parser.py,sha256=bkVqtQDl0y0mQi6Ue8mSXNggUuPfXlywgyXLCOPccXg,4750
106
106
  claude_mpm/cli/parsers/tickets_parser.py,sha256=FYl-VNH7PrZzfZUCcjnf6F7g6JXnL8YDxwrmR5svIcg,6966
@@ -114,7 +114,7 @@ claude_mpm/cli_module/args.py,sha256=50_Y3AgMNeidtPjQ5-WZ1o-5Y7G2GAGQwMmllYjVScE
114
114
  claude_mpm/cli_module/commands.py,sha256=zZmjP4J19i9PNMA7gCI5Dl--R1u_H4sCt5ZtOadZkY4,7013
115
115
  claude_mpm/cli_module/migration_example.py,sha256=DtQ59RyoBD6r8FIfrjKXCQ8-xnUiOqP5McBiS6_W1Qc,5183
116
116
  claude_mpm/commands/__init__.py,sha256=paX5Ub5-UmRgiQ8UgKWIKwU2-RjLu67OmNJND-fVtjg,588
117
- claude_mpm/commands/mpm-agents.md,sha256=MxFYlIhvtsKwtUdGJROKTr2So-izaWGjwRkf5seSqXM,280
117
+ claude_mpm/commands/mpm-agents.md,sha256=JnYPJ-eWvIEEtiCB6iPu182P2xDBRvU3ArVXQ7h32kU,1341
118
118
  claude_mpm/commands/mpm-config.md,sha256=F-9kKUC30gT6QXvOr5OTpYOq6L97iA2-SXVGxXjyF8M,547
119
119
  claude_mpm/commands/mpm-doctor.md,sha256=ut5LhFKVRw-2ecjMSPsnaTiRuFXa6Q9t-Wgl3CCnQvk,590
120
120
  claude_mpm/commands/mpm-help.md,sha256=gUMhJ1aTWyNeNMLZJeDJMN2sPuL-ugUFalmrNDEP5bI,282
@@ -530,16 +530,16 @@ claude_mpm/services/memory/cache/__init__.py,sha256=6M6-P8ParyxX8vOgp_IxHgLMvacr
530
530
  claude_mpm/services/memory/cache/shared_prompt_cache.py,sha256=crnYPUT8zcS7TvoE1vW7pyaf4T77N5rJ1wUf_YQ2vvo,28704
531
531
  claude_mpm/services/memory/cache/simple_cache.py,sha256=qsTjbcsPxj-kNfaod9VN_uE5NioIwpfkUin_mMVUJCg,10218
532
532
  claude_mpm/services/monitor/__init__.py,sha256=X7gxSLUm9Fg_zEsX6LtCHP2ipF0qj6Emkun20h2So7g,745
533
- claude_mpm/services/monitor/daemon.py,sha256=YRwAZrunZ1jE-P61gNCkFLEys7mTkJLGBxUIH8H6upU,7830
534
- claude_mpm/services/monitor/event_emitter.py,sha256=D9o38lOFkzJx40RE0igvFVdAi1w5WqV_09Rz7nCjWdI,9609
535
- claude_mpm/services/monitor/server.py,sha256=i2mynduUMNAXU0h_SSz-cx7LaBaLrkVPgtGvKXGhQFE,16008
533
+ claude_mpm/services/monitor/daemon.py,sha256=9PyhEXpMUJ1LBmZdoLwzQmfjdCnXn8GeErywBnMScoY,12588
534
+ claude_mpm/services/monitor/event_emitter.py,sha256=JzRLNg8PUJ5s3ulNnq_D4yqCPItvidJzu8DmFxriieQ,12224
535
+ claude_mpm/services/monitor/server.py,sha256=fuefcZVqjEupyIKun1ElPw2mlEm2frfqh1iLcnB3af4,21911
536
536
  claude_mpm/services/monitor/handlers/__init__.py,sha256=rxPCb1jc4v0-ODRK4tTzO_IxWLlaG0d6ghF8yJgVvxw,656
537
537
  claude_mpm/services/monitor/handlers/code_analysis.py,sha256=mHyI27Wp6WVmUBc0m0i991ogyFZBTvkrfR7Kf3EAk5U,11474
538
538
  claude_mpm/services/monitor/handlers/dashboard.py,sha256=uGBhb-6RG6u4WLipUXgdx7RCW-vb_qek5dIfHIwAC7o,9805
539
539
  claude_mpm/services/monitor/handlers/hooks.py,sha256=s9CVlDLd-onH6WCOPeN06UjpEYVfuGjHw_icz220K_M,16533
540
540
  claude_mpm/services/monitor/management/__init__.py,sha256=mxaEFRgvvgV85gUpXu_DsnHtywihdP14EisvISAVZuQ,525
541
541
  claude_mpm/services/monitor/management/health.py,sha256=Wm92Cli_4cWD6B89KX_CdpAvvevuEaGB8Ah59ILhFww,3772
542
- claude_mpm/services/monitor/management/lifecycle.py,sha256=T7-NF1vous250B7gkEEuaKzzxkhnhn4M3vlXRadbOjE,9082
542
+ claude_mpm/services/monitor/management/lifecycle.py,sha256=Cahpc1-R09ihDYVWiMI9wnv-Qw20cNhHHcJyxZ9JcBo,10575
543
543
  claude_mpm/services/project/__init__.py,sha256=IUclN1L7ChHCNya7PJiVxu4nttxsrj3WRIpwyA1A_hw,512
544
544
  claude_mpm/services/project/analyzer.py,sha256=VHlLrP8-S5gr12w4Yzs7-6d7LWdJKISHPCFSG7SDiQU,38434
545
545
  claude_mpm/services/project/analyzer_refactored.py,sha256=USYEdPAhSoGPqZCpaT89Dw6ElFW_L1yXSURheQjAhLA,18243
@@ -620,9 +620,9 @@ claude_mpm/utils/subprocess_utils.py,sha256=zgiwLqh_17WxHpySvUPH65pb4bzIeUGOAYUJ
620
620
  claude_mpm/validation/__init__.py,sha256=YZhwE3mhit-lslvRLuwfX82xJ_k4haZeKmh4IWaVwtk,156
621
621
  claude_mpm/validation/agent_validator.py,sha256=3Lo6LK-Mw9IdnL_bd3zl_R6FkgSVDYKUUM7EeVVD3jc,20865
622
622
  claude_mpm/validation/frontmatter_validator.py,sha256=u8g4Eyd_9O6ugj7Un47oSGh3kqv4wMkuks2i_CtWRvM,7028
623
- claude_mpm-4.2.11.dist-info/licenses/LICENSE,sha256=lpaivOlPuBZW1ds05uQLJJswy8Rp_HMNieJEbFlqvLk,1072
624
- claude_mpm-4.2.11.dist-info/METADATA,sha256=bOfT109kHO6oXl8vnKSjnAw59js6kRdhHUpG5-2f3pg,13777
625
- claude_mpm-4.2.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
626
- claude_mpm-4.2.11.dist-info/entry_points.txt,sha256=FDPZgz8JOvD-6iuXY2l9Zbo9zYVRuE4uz4Qr0vLeGOk,471
627
- claude_mpm-4.2.11.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
628
- claude_mpm-4.2.11.dist-info/RECORD,,
623
+ claude_mpm-4.2.13.dist-info/licenses/LICENSE,sha256=lpaivOlPuBZW1ds05uQLJJswy8Rp_HMNieJEbFlqvLk,1072
624
+ claude_mpm-4.2.13.dist-info/METADATA,sha256=8KwBn_hYIaHTcm94JC1lyY29sy8K2_PwOtmW_8PMbn4,13777
625
+ claude_mpm-4.2.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
626
+ claude_mpm-4.2.13.dist-info/entry_points.txt,sha256=FDPZgz8JOvD-6iuXY2l9Zbo9zYVRuE4uz4Qr0vLeGOk,471
627
+ claude_mpm-4.2.13.dist-info/top_level.txt,sha256=1nUg3FEaBySgm8t-s54jK5zoPnu3_eY6EP6IOlekyHA,11
628
+ claude_mpm-4.2.13.dist-info/RECORD,,