code-puppy 0.0.127__py3-none-any.whl → 0.0.129__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. code_puppy/__init__.py +1 -0
  2. code_puppy/agent.py +65 -69
  3. code_puppy/agents/agent_code_puppy.py +0 -3
  4. code_puppy/agents/runtime_manager.py +231 -0
  5. code_puppy/command_line/command_handler.py +56 -25
  6. code_puppy/command_line/mcp_commands.py +1298 -0
  7. code_puppy/command_line/meta_command_handler.py +3 -2
  8. code_puppy/command_line/model_picker_completion.py +21 -8
  9. code_puppy/http_utils.py +1 -1
  10. code_puppy/main.py +99 -158
  11. code_puppy/mcp/__init__.py +23 -0
  12. code_puppy/mcp/async_lifecycle.py +237 -0
  13. code_puppy/mcp/circuit_breaker.py +218 -0
  14. code_puppy/mcp/config_wizard.py +437 -0
  15. code_puppy/mcp/dashboard.py +291 -0
  16. code_puppy/mcp/error_isolation.py +360 -0
  17. code_puppy/mcp/examples/retry_example.py +208 -0
  18. code_puppy/mcp/health_monitor.py +549 -0
  19. code_puppy/mcp/managed_server.py +346 -0
  20. code_puppy/mcp/manager.py +701 -0
  21. code_puppy/mcp/registry.py +412 -0
  22. code_puppy/mcp/retry_manager.py +321 -0
  23. code_puppy/mcp/server_registry_catalog.py +751 -0
  24. code_puppy/mcp/status_tracker.py +355 -0
  25. code_puppy/messaging/spinner/textual_spinner.py +6 -2
  26. code_puppy/model_factory.py +19 -4
  27. code_puppy/models.json +8 -6
  28. code_puppy/tui/app.py +19 -27
  29. code_puppy/tui/tests/test_agent_command.py +22 -15
  30. {code_puppy-0.0.127.data → code_puppy-0.0.129.data}/data/code_puppy/models.json +8 -6
  31. {code_puppy-0.0.127.dist-info → code_puppy-0.0.129.dist-info}/METADATA +4 -3
  32. {code_puppy-0.0.127.dist-info → code_puppy-0.0.129.dist-info}/RECORD +35 -19
  33. {code_puppy-0.0.127.dist-info → code_puppy-0.0.129.dist-info}/WHEEL +0 -0
  34. {code_puppy-0.0.127.dist-info → code_puppy-0.0.129.dist-info}/entry_points.txt +0 -0
  35. {code_puppy-0.0.127.dist-info → code_puppy-0.0.129.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,701 @@
1
+ """
2
+ MCPManager - Central coordinator for all MCP server operations.
3
+
4
+ This module provides the main MCPManager class that coordinates all MCP server
5
+ operations while maintaining pydantic-ai compatibility. It serves as the central
6
+ point for managing servers, registering configurations, and providing servers
7
+ to agents.
8
+ """
9
+
10
+ import logging
11
+ from dataclasses import dataclass
12
+ from datetime import datetime
13
+ from typing import Dict, List, Optional, Union, Any
14
+ import asyncio
15
+
16
+ from pydantic_ai.mcp import MCPServerSSE, MCPServerStdio, MCPServerStreamableHTTP
17
+
18
+ from .managed_server import ManagedMCPServer, ServerConfig, ServerState
19
+ from .registry import ServerRegistry
20
+ from .status_tracker import ServerStatusTracker
21
+ from .async_lifecycle import get_lifecycle_manager
22
+
23
+ # Configure logging
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @dataclass
28
+ class ServerInfo:
29
+ """Information about a registered server."""
30
+ id: str
31
+ name: str
32
+ type: str
33
+ enabled: bool
34
+ state: ServerState
35
+ quarantined: bool
36
+ uptime_seconds: Optional[float]
37
+ error_message: Optional[str]
38
+ health: Optional[Dict[str, Any]] = None
39
+ start_time: Optional[datetime] = None
40
+ latency_ms: Optional[float] = None
41
+
42
+
43
+ class MCPManager:
44
+ """
45
+ Central coordinator for all MCP server operations.
46
+
47
+ This class manages the lifecycle of MCP servers while maintaining
48
+ 100% pydantic-ai compatibility. It coordinates between the registry,
49
+ status tracker, and managed servers to provide a unified interface
50
+ for server management.
51
+
52
+ The critical method get_servers_for_agent() returns actual pydantic-ai
53
+ server instances for use with Agent objects.
54
+
55
+ Example usage:
56
+ manager = get_mcp_manager()
57
+
58
+ # Register a server
59
+ config = ServerConfig(
60
+ id="", # Auto-generated
61
+ name="filesystem",
62
+ type="stdio",
63
+ config={"command": "npx", "args": ["-y", "@modelcontextprotocol/server-filesystem"]}
64
+ )
65
+ server_id = manager.register_server(config)
66
+
67
+ # Get servers for agent use
68
+ servers = manager.get_servers_for_agent() # Returns actual pydantic-ai instances
69
+ """
70
+
71
+ def __init__(self):
72
+ """Initialize the MCP manager with all required components."""
73
+ # Initialize core components
74
+ self.registry = ServerRegistry()
75
+ self.status_tracker = ServerStatusTracker()
76
+
77
+ # Active managed servers (server_id -> ManagedMCPServer)
78
+ self._managed_servers: Dict[str, ManagedMCPServer] = {}
79
+
80
+ # Load existing servers from registry
81
+ self._initialize_servers()
82
+
83
+ logger.info("MCPManager initialized with core components")
84
+
85
+ def _initialize_servers(self) -> None:
86
+ """Initialize managed servers from registry configurations."""
87
+ configs = self.registry.list_all()
88
+ initialized_count = 0
89
+
90
+ for config in configs:
91
+ try:
92
+ managed_server = ManagedMCPServer(config)
93
+ self._managed_servers[config.id] = managed_server
94
+
95
+ # Update status tracker - always start as STOPPED
96
+ # Servers must be explicitly started with /mcp start
97
+ self.status_tracker.set_status(config.id, ServerState.STOPPED)
98
+
99
+ initialized_count += 1
100
+ logger.debug(f"Initialized managed server: {config.name} (ID: {config.id})")
101
+
102
+ except Exception as e:
103
+ logger.error(f"Failed to initialize server {config.name}: {e}")
104
+ # Update status tracker with error state
105
+ self.status_tracker.set_status(config.id, ServerState.ERROR)
106
+ self.status_tracker.record_event(
107
+ config.id,
108
+ "initialization_error",
109
+ {"error": str(e), "message": f"Failed to initialize: {e}"}
110
+ )
111
+
112
+ logger.info(f"Initialized {initialized_count} servers from registry")
113
+
114
+ def register_server(self, config: ServerConfig) -> str:
115
+ """
116
+ Register a new server configuration.
117
+
118
+ Args:
119
+ config: Server configuration to register
120
+
121
+ Returns:
122
+ Server ID of the registered server
123
+
124
+ Raises:
125
+ ValueError: If configuration is invalid or server already exists
126
+ Exception: If server initialization fails
127
+ """
128
+ # Register with registry (validates config and assigns ID)
129
+ server_id = self.registry.register(config)
130
+
131
+ try:
132
+ # Create managed server instance
133
+ managed_server = ManagedMCPServer(config)
134
+ self._managed_servers[server_id] = managed_server
135
+
136
+ # Update status tracker - always start as STOPPED
137
+ # Servers must be explicitly started with /mcp start
138
+ self.status_tracker.set_status(server_id, ServerState.STOPPED)
139
+
140
+ # Record registration event
141
+ self.status_tracker.record_event(
142
+ server_id,
143
+ "registered",
144
+ {"name": config.name, "type": config.type, "message": "Server registered successfully"}
145
+ )
146
+
147
+ logger.info(f"Successfully registered server: {config.name} (ID: {server_id})")
148
+ return server_id
149
+
150
+ except Exception as e:
151
+ # Remove from registry if initialization failed
152
+ self.registry.unregister(server_id)
153
+ logger.error(f"Failed to initialize registered server {config.name}: {e}")
154
+ raise
155
+
156
+ def get_servers_for_agent(self) -> List[Union[MCPServerSSE, MCPServerStdio, MCPServerStreamableHTTP]]:
157
+ """
158
+ Get pydantic-ai compatible servers for agent use.
159
+
160
+ This is the critical method that must return actual pydantic-ai server
161
+ instances (not wrappers). Only returns enabled, non-quarantined servers.
162
+ Handles errors gracefully by logging but not crashing.
163
+
164
+ Returns:
165
+ List of actual pydantic-ai MCP server instances ready for use
166
+ """
167
+ servers = []
168
+
169
+ for server_id, managed_server in self._managed_servers.items():
170
+ try:
171
+ # Only include enabled, non-quarantined servers
172
+ if managed_server.is_enabled() and not managed_server.is_quarantined():
173
+ # Get the actual pydantic-ai server instance
174
+ pydantic_server = managed_server.get_pydantic_server()
175
+ servers.append(pydantic_server)
176
+
177
+ logger.debug(f"Added server to agent list: {managed_server.config.name}")
178
+ else:
179
+ logger.debug(
180
+ f"Skipping server {managed_server.config.name}: "
181
+ f"enabled={managed_server.is_enabled()}, "
182
+ f"quarantined={managed_server.is_quarantined()}"
183
+ )
184
+
185
+ except Exception as e:
186
+ # Log error but don't crash - continue with other servers
187
+ logger.error(
188
+ f"Error getting server {managed_server.config.name} for agent: {e}"
189
+ )
190
+ # Record error event
191
+ self.status_tracker.record_event(
192
+ server_id,
193
+ "agent_access_error",
194
+ {"error": str(e), "message": f"Error accessing server for agent: {e}"}
195
+ )
196
+ continue
197
+
198
+ logger.debug(f"Returning {len(servers)} servers for agent use")
199
+ return servers
200
+
201
+ def get_server(self, server_id: str) -> Optional[ManagedMCPServer]:
202
+ """
203
+ Get managed server by ID.
204
+
205
+ Args:
206
+ server_id: ID of server to retrieve
207
+
208
+ Returns:
209
+ ManagedMCPServer instance if found, None otherwise
210
+ """
211
+ return self._managed_servers.get(server_id)
212
+
213
+ def get_server_by_name(self, name: str) -> Optional[ServerConfig]:
214
+ """
215
+ Get server configuration by name.
216
+
217
+ Args:
218
+ name: Name of server to retrieve
219
+
220
+ Returns:
221
+ ServerConfig if found, None otherwise
222
+ """
223
+ return self.registry.get_by_name(name)
224
+
225
+ def update_server(self, server_id: str, config: ServerConfig) -> bool:
226
+ """
227
+ Update server configuration.
228
+
229
+ Args:
230
+ server_id: ID of server to update
231
+ config: New configuration
232
+
233
+ Returns:
234
+ True if server was updated, False if not found
235
+ """
236
+ # Update in registry
237
+ if not self.registry.update(server_id, config):
238
+ return False
239
+
240
+ # Update managed server if it exists
241
+ managed_server = self._managed_servers.get(server_id)
242
+ if managed_server:
243
+ managed_server.config = config
244
+ # Clear cached server to force recreation on next use
245
+ managed_server.server = None
246
+ logger.info(f"Updated server configuration: {config.name}")
247
+
248
+ return True
249
+
250
+ def list_servers(self) -> List[ServerInfo]:
251
+ """
252
+ Get information about all registered servers.
253
+
254
+ Returns:
255
+ List of ServerInfo objects with current status
256
+ """
257
+ server_infos = []
258
+
259
+ for server_id, managed_server in self._managed_servers.items():
260
+ try:
261
+ status = managed_server.get_status()
262
+ uptime = self.status_tracker.get_uptime(server_id)
263
+ summary = self.status_tracker.get_server_summary(server_id)
264
+
265
+ # Get health information from metadata
266
+ health_info = self.status_tracker.get_metadata(server_id, "health")
267
+ if health_info is None:
268
+ # Create basic health info based on state
269
+ health_info = {
270
+ "is_healthy": status["state"] == "running",
271
+ "error": status.get("error_message")
272
+ }
273
+
274
+ # Get latency from metadata
275
+ latency_ms = self.status_tracker.get_metadata(server_id, "latency_ms")
276
+
277
+ server_info = ServerInfo(
278
+ id=server_id,
279
+ name=managed_server.config.name,
280
+ type=managed_server.config.type,
281
+ enabled=managed_server.is_enabled(),
282
+ state=ServerState(status["state"]),
283
+ quarantined=managed_server.is_quarantined(),
284
+ uptime_seconds=uptime.total_seconds() if uptime else None,
285
+ error_message=status.get("error_message"),
286
+ health=health_info,
287
+ start_time=summary.get("start_time"),
288
+ latency_ms=latency_ms
289
+ )
290
+
291
+ server_infos.append(server_info)
292
+
293
+ except Exception as e:
294
+ logger.error(f"Error getting info for server {server_id}: {e}")
295
+ # Create error info
296
+ config = self.registry.get(server_id)
297
+ if config:
298
+ server_info = ServerInfo(
299
+ id=server_id,
300
+ name=config.name,
301
+ type=config.type,
302
+ enabled=False,
303
+ state=ServerState.ERROR,
304
+ quarantined=False,
305
+ uptime_seconds=None,
306
+ error_message=str(e),
307
+ health={"is_healthy": False, "error": str(e)},
308
+ start_time=None,
309
+ latency_ms=None
310
+ )
311
+ server_infos.append(server_info)
312
+
313
+ return server_infos
314
+
315
+ async def start_server(self, server_id: str) -> bool:
316
+ """
317
+ Start a server (enable it and start the subprocess/connection).
318
+
319
+ This both enables the server for agent use AND starts the actual process.
320
+ For stdio servers, this starts the subprocess.
321
+ For SSE/HTTP servers, this establishes the connection.
322
+
323
+ Args:
324
+ server_id: ID of server to start
325
+
326
+ Returns:
327
+ True if server was started, False if not found or failed
328
+ """
329
+ managed_server = self._managed_servers.get(server_id)
330
+ if managed_server is None:
331
+ logger.warning(f"Attempted to start non-existent server: {server_id}")
332
+ return False
333
+
334
+ try:
335
+ # First enable the server
336
+ managed_server.enable()
337
+ self.status_tracker.set_status(server_id, ServerState.RUNNING)
338
+ self.status_tracker.record_start_time(server_id)
339
+
340
+ # Try to actually start it if we have an async context
341
+ try:
342
+ # Get the pydantic-ai server instance
343
+ pydantic_server = managed_server.get_pydantic_server()
344
+
345
+ # Start the server using the async lifecycle manager
346
+ lifecycle_mgr = get_lifecycle_manager()
347
+ started = await lifecycle_mgr.start_server(server_id, pydantic_server)
348
+
349
+ if started:
350
+ logger.info(f"Started server process: {managed_server.config.name} (ID: {server_id})")
351
+ self.status_tracker.record_event(
352
+ server_id,
353
+ "started",
354
+ {"message": "Server started and process running"}
355
+ )
356
+ else:
357
+ logger.warning(f"Could not start process for server {server_id}, but it's enabled")
358
+ self.status_tracker.record_event(
359
+ server_id,
360
+ "enabled",
361
+ {"message": "Server enabled (process will start when used)"}
362
+ )
363
+ except Exception as e:
364
+ # Process start failed, but server is still enabled
365
+ logger.warning(f"Could not start process for server {server_id}: {e}")
366
+ self.status_tracker.record_event(
367
+ server_id,
368
+ "enabled",
369
+ {"message": "Server enabled (process will start when used)"}
370
+ )
371
+
372
+ return True
373
+
374
+ except Exception as e:
375
+ logger.error(f"Failed to start server {server_id}: {e}")
376
+ self.status_tracker.set_status(server_id, ServerState.ERROR)
377
+ self.status_tracker.record_event(
378
+ server_id,
379
+ "start_error",
380
+ {"error": str(e), "message": f"Error starting server: {e}"}
381
+ )
382
+ return False
383
+
384
+ def start_server_sync(self, server_id: str) -> bool:
385
+ """
386
+ Synchronous wrapper for start_server.
387
+ """
388
+ try:
389
+ loop = asyncio.get_running_loop()
390
+ # We're in an async context, but we need to wait for completion
391
+ # Create a future and schedule the coroutine
392
+ import concurrent.futures
393
+
394
+ # Use run_in_executor to run the async function synchronously
395
+ async def run_async():
396
+ return await self.start_server(server_id)
397
+
398
+ # Schedule the task and wait briefly for it to complete
399
+ task = asyncio.create_task(run_async())
400
+
401
+ # Give it a moment to complete - this fixes the race condition
402
+ import time
403
+ time.sleep(0.1) # Small delay to let async tasks progress
404
+
405
+ # Check if task completed, if not, fall back to sync enable
406
+ if task.done():
407
+ try:
408
+ result = task.result()
409
+ return result
410
+ except Exception:
411
+ pass
412
+
413
+ # If async didn't complete, enable synchronously
414
+ managed_server = self._managed_servers.get(server_id)
415
+ if managed_server:
416
+ managed_server.enable()
417
+ self.status_tracker.set_status(server_id, ServerState.RUNNING)
418
+ self.status_tracker.record_start_time(server_id)
419
+ logger.info(f"Enabled server synchronously: {server_id}")
420
+ return True
421
+ return False
422
+
423
+ except RuntimeError:
424
+ # No async loop, just enable the server
425
+ managed_server = self._managed_servers.get(server_id)
426
+ if managed_server:
427
+ managed_server.enable()
428
+ self.status_tracker.set_status(server_id, ServerState.RUNNING)
429
+ self.status_tracker.record_start_time(server_id)
430
+ logger.info(f"Enabled server (no async context): {server_id}")
431
+ return True
432
+ return False
433
+
434
+ async def stop_server(self, server_id: str) -> bool:
435
+ """
436
+ Stop a server (disable it and stop the subprocess/connection).
437
+
438
+ This both disables the server AND stops any running process.
439
+ For stdio servers, this stops the subprocess.
440
+ For SSE/HTTP servers, this closes the connection.
441
+
442
+ Args:
443
+ server_id: ID of server to stop
444
+
445
+ Returns:
446
+ True if server was stopped, False if not found
447
+ """
448
+ managed_server = self._managed_servers.get(server_id)
449
+ if managed_server is None:
450
+ logger.warning(f"Attempted to stop non-existent server: {server_id}")
451
+ return False
452
+
453
+ try:
454
+ # First disable the server
455
+ managed_server.disable()
456
+ self.status_tracker.set_status(server_id, ServerState.STOPPED)
457
+ self.status_tracker.record_stop_time(server_id)
458
+
459
+ # Try to actually stop it if we have an async context
460
+ try:
461
+ # Stop the server using the async lifecycle manager
462
+ lifecycle_mgr = get_lifecycle_manager()
463
+ stopped = await lifecycle_mgr.stop_server(server_id)
464
+
465
+ if stopped:
466
+ logger.info(f"Stopped server process: {managed_server.config.name} (ID: {server_id})")
467
+ self.status_tracker.record_event(
468
+ server_id,
469
+ "stopped",
470
+ {"message": "Server stopped and process terminated"}
471
+ )
472
+ else:
473
+ logger.info(f"Server {server_id} disabled (no process was running)")
474
+ self.status_tracker.record_event(
475
+ server_id,
476
+ "disabled",
477
+ {"message": "Server disabled"}
478
+ )
479
+ except Exception as e:
480
+ # Process stop failed, but server is still disabled
481
+ logger.warning(f"Could not stop process for server {server_id}: {e}")
482
+ self.status_tracker.record_event(
483
+ server_id,
484
+ "disabled",
485
+ {"message": "Server disabled"}
486
+ )
487
+
488
+ return True
489
+
490
+ except Exception as e:
491
+ logger.error(f"Failed to stop server {server_id}: {e}")
492
+ self.status_tracker.record_event(
493
+ server_id,
494
+ "stop_error",
495
+ {"error": str(e), "message": f"Error stopping server: {e}"}
496
+ )
497
+ return False
498
+
499
+ def stop_server_sync(self, server_id: str) -> bool:
500
+ """
501
+ Synchronous wrapper for stop_server.
502
+ """
503
+ try:
504
+ loop = asyncio.get_running_loop()
505
+ # We're in an async context, but we need to wait for completion
506
+ async def run_async():
507
+ return await self.stop_server(server_id)
508
+
509
+ # Schedule the task and wait briefly for it to complete
510
+ task = asyncio.create_task(run_async())
511
+
512
+ # Give it a moment to complete - this fixes the race condition
513
+ import time
514
+ time.sleep(0.1) # Small delay to let async tasks progress
515
+
516
+ # Check if task completed, if not, fall back to sync disable
517
+ if task.done():
518
+ try:
519
+ result = task.result()
520
+ return result
521
+ except Exception:
522
+ pass
523
+
524
+ # If async didn't complete, disable synchronously
525
+ managed_server = self._managed_servers.get(server_id)
526
+ if managed_server:
527
+ managed_server.disable()
528
+ self.status_tracker.set_status(server_id, ServerState.STOPPED)
529
+ self.status_tracker.record_stop_time(server_id)
530
+ logger.info(f"Disabled server synchronously: {server_id}")
531
+ return True
532
+ return False
533
+
534
+ except RuntimeError:
535
+ # No async loop, just disable the server
536
+ managed_server = self._managed_servers.get(server_id)
537
+ if managed_server:
538
+ managed_server.disable()
539
+ self.status_tracker.set_status(server_id, ServerState.STOPPED)
540
+ self.status_tracker.record_stop_time(server_id)
541
+ logger.info(f"Disabled server (no async context): {server_id}")
542
+ return True
543
+ return False
544
+
545
+ def reload_server(self, server_id: str) -> bool:
546
+ """
547
+ Reload a server configuration.
548
+
549
+ Args:
550
+ server_id: ID of server to reload
551
+
552
+ Returns:
553
+ True if server was reloaded, False if not found or failed
554
+ """
555
+ config = self.registry.get(server_id)
556
+ if config is None:
557
+ logger.warning(f"Attempted to reload non-existent server: {server_id}")
558
+ return False
559
+
560
+ try:
561
+ # Remove old managed server
562
+ if server_id in self._managed_servers:
563
+ old_server = self._managed_servers[server_id]
564
+ logger.debug(f"Removing old server instance: {old_server.config.name}")
565
+ del self._managed_servers[server_id]
566
+
567
+ # Create new managed server
568
+ managed_server = ManagedMCPServer(config)
569
+ self._managed_servers[server_id] = managed_server
570
+
571
+ # Update status tracker - always start as STOPPED
572
+ # Servers must be explicitly started with /mcp start
573
+ self.status_tracker.set_status(server_id, ServerState.STOPPED)
574
+
575
+ # Record reload event
576
+ self.status_tracker.record_event(
577
+ server_id,
578
+ "reloaded",
579
+ {"message": "Server configuration reloaded"}
580
+ )
581
+
582
+ logger.info(f"Reloaded server: {config.name} (ID: {server_id})")
583
+ return True
584
+
585
+ except Exception as e:
586
+ logger.error(f"Failed to reload server {server_id}: {e}")
587
+ self.status_tracker.set_status(server_id, ServerState.ERROR)
588
+ self.status_tracker.record_event(
589
+ server_id,
590
+ "reload_error",
591
+ {"error": str(e), "message": f"Error reloading server: {e}"}
592
+ )
593
+ return False
594
+
595
+ def remove_server(self, server_id: str) -> bool:
596
+ """
597
+ Remove a server completely.
598
+
599
+ Args:
600
+ server_id: ID of server to remove
601
+
602
+ Returns:
603
+ True if server was removed, False if not found
604
+ """
605
+ # Get server name for logging
606
+ config = self.registry.get(server_id)
607
+ server_name = config.name if config else server_id
608
+
609
+ # Remove from registry
610
+ registry_removed = self.registry.unregister(server_id)
611
+
612
+ # Remove from managed servers
613
+ managed_removed = False
614
+ if server_id in self._managed_servers:
615
+ del self._managed_servers[server_id]
616
+ managed_removed = True
617
+
618
+ # Record removal event if server existed
619
+ if registry_removed or managed_removed:
620
+ self.status_tracker.record_event(
621
+ server_id,
622
+ "removed",
623
+ {"message": "Server removed"}
624
+ )
625
+ logger.info(f"Removed server: {server_name} (ID: {server_id})")
626
+ return True
627
+ else:
628
+ logger.warning(f"Attempted to remove non-existent server: {server_id}")
629
+ return False
630
+
631
+ def get_server_status(self, server_id: str) -> Dict[str, Any]:
632
+ """
633
+ Get comprehensive status for a server.
634
+
635
+ Args:
636
+ server_id: ID of server to get status for
637
+
638
+ Returns:
639
+ Dictionary containing comprehensive status information
640
+ """
641
+ # Get basic status from managed server
642
+ managed_server = self._managed_servers.get(server_id)
643
+ if managed_server is None:
644
+ return {
645
+ "server_id": server_id,
646
+ "exists": False,
647
+ "error": "Server not found"
648
+ }
649
+
650
+ try:
651
+ # Get status from managed server
652
+ status = managed_server.get_status()
653
+
654
+ # Add status tracker information
655
+ tracker_summary = self.status_tracker.get_server_summary(server_id)
656
+ recent_events = self.status_tracker.get_events(server_id, limit=5)
657
+
658
+ # Combine all information
659
+ comprehensive_status = {
660
+ **status, # Include all managed server status
661
+ "tracker_state": tracker_summary["state"],
662
+ "tracker_metadata": tracker_summary["metadata"],
663
+ "recent_events_count": tracker_summary["recent_events_count"],
664
+ "tracker_uptime": tracker_summary["uptime"],
665
+ "last_event_time": tracker_summary["last_event_time"],
666
+ "recent_events": [
667
+ {
668
+ "timestamp": event.timestamp.isoformat(),
669
+ "event_type": event.event_type,
670
+ "details": event.details
671
+ }
672
+ for event in recent_events
673
+ ]
674
+ }
675
+
676
+ return comprehensive_status
677
+
678
+ except Exception as e:
679
+ logger.error(f"Error getting status for server {server_id}: {e}")
680
+ return {
681
+ "server_id": server_id,
682
+ "exists": True,
683
+ "error": str(e)
684
+ }
685
+
686
+
687
+ # Singleton instance
688
+ _manager_instance: Optional[MCPManager] = None
689
+
690
+
691
+ def get_mcp_manager() -> MCPManager:
692
+ """
693
+ Get the singleton MCPManager instance.
694
+
695
+ Returns:
696
+ The global MCPManager instance
697
+ """
698
+ global _manager_instance
699
+ if _manager_instance is None:
700
+ _manager_instance = MCPManager()
701
+ return _manager_instance