code-puppy 0.0.135__py3-none-any.whl → 0.0.137__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. code_puppy/agent.py +15 -17
  2. code_puppy/agents/agent_manager.py +320 -9
  3. code_puppy/agents/base_agent.py +58 -2
  4. code_puppy/agents/runtime_manager.py +68 -42
  5. code_puppy/command_line/command_handler.py +82 -33
  6. code_puppy/command_line/mcp/__init__.py +10 -0
  7. code_puppy/command_line/mcp/add_command.py +183 -0
  8. code_puppy/command_line/mcp/base.py +35 -0
  9. code_puppy/command_line/mcp/handler.py +133 -0
  10. code_puppy/command_line/mcp/help_command.py +146 -0
  11. code_puppy/command_line/mcp/install_command.py +176 -0
  12. code_puppy/command_line/mcp/list_command.py +94 -0
  13. code_puppy/command_line/mcp/logs_command.py +126 -0
  14. code_puppy/command_line/mcp/remove_command.py +82 -0
  15. code_puppy/command_line/mcp/restart_command.py +92 -0
  16. code_puppy/command_line/mcp/search_command.py +117 -0
  17. code_puppy/command_line/mcp/start_all_command.py +126 -0
  18. code_puppy/command_line/mcp/start_command.py +98 -0
  19. code_puppy/command_line/mcp/status_command.py +185 -0
  20. code_puppy/command_line/mcp/stop_all_command.py +109 -0
  21. code_puppy/command_line/mcp/stop_command.py +79 -0
  22. code_puppy/command_line/mcp/test_command.py +107 -0
  23. code_puppy/command_line/mcp/utils.py +129 -0
  24. code_puppy/command_line/mcp/wizard_utils.py +259 -0
  25. code_puppy/command_line/model_picker_completion.py +21 -4
  26. code_puppy/command_line/prompt_toolkit_completion.py +9 -0
  27. code_puppy/config.py +5 -5
  28. code_puppy/main.py +23 -17
  29. code_puppy/mcp/__init__.py +42 -16
  30. code_puppy/mcp/async_lifecycle.py +51 -49
  31. code_puppy/mcp/blocking_startup.py +125 -113
  32. code_puppy/mcp/captured_stdio_server.py +63 -70
  33. code_puppy/mcp/circuit_breaker.py +63 -47
  34. code_puppy/mcp/config_wizard.py +169 -136
  35. code_puppy/mcp/dashboard.py +79 -71
  36. code_puppy/mcp/error_isolation.py +147 -100
  37. code_puppy/mcp/examples/retry_example.py +55 -42
  38. code_puppy/mcp/health_monitor.py +152 -141
  39. code_puppy/mcp/managed_server.py +100 -93
  40. code_puppy/mcp/manager.py +168 -156
  41. code_puppy/mcp/registry.py +148 -110
  42. code_puppy/mcp/retry_manager.py +63 -61
  43. code_puppy/mcp/server_registry_catalog.py +271 -225
  44. code_puppy/mcp/status_tracker.py +80 -80
  45. code_puppy/mcp/system_tools.py +47 -52
  46. code_puppy/messaging/message_queue.py +20 -13
  47. code_puppy/messaging/renderers.py +30 -15
  48. code_puppy/state_management.py +103 -0
  49. code_puppy/tui/app.py +64 -7
  50. code_puppy/tui/components/chat_view.py +3 -3
  51. code_puppy/tui/components/human_input_modal.py +12 -8
  52. code_puppy/tui/screens/__init__.py +2 -2
  53. code_puppy/tui/screens/mcp_install_wizard.py +208 -179
  54. code_puppy/tui/tests/test_agent_command.py +3 -3
  55. {code_puppy-0.0.135.dist-info → code_puppy-0.0.137.dist-info}/METADATA +1 -1
  56. {code_puppy-0.0.135.dist-info → code_puppy-0.0.137.dist-info}/RECORD +60 -42
  57. code_puppy/command_line/mcp_commands.py +0 -1789
  58. {code_puppy-0.0.135.data → code_puppy-0.0.137.data}/data/code_puppy/models.json +0 -0
  59. {code_puppy-0.0.135.dist-info → code_puppy-0.0.137.dist-info}/WHEEL +0 -0
  60. {code_puppy-0.0.135.dist-info → code_puppy-0.0.137.dist-info}/entry_points.txt +0 -0
  61. {code_puppy-0.0.135.dist-info → code_puppy-0.0.137.dist-info}/licenses/LICENSE +0 -0
code_puppy/mcp/manager.py CHANGED
@@ -1,24 +1,24 @@
1
1
  """
2
2
  MCPManager - Central coordinator for all MCP server operations.
3
3
 
4
- This module provides the main MCPManager class that coordinates all MCP server
4
+ This module provides the main MCPManager class that coordinates all MCP server
5
5
  operations while maintaining pydantic-ai compatibility. It serves as the central
6
6
  point for managing servers, registering configurations, and providing servers
7
7
  to agents.
8
8
  """
9
9
 
10
+ import asyncio
10
11
  import logging
11
12
  from dataclasses import dataclass
12
13
  from datetime import datetime
13
- from typing import Dict, List, Optional, Union, Any
14
- import asyncio
14
+ from typing import Any, Dict, List, Optional, Union
15
15
 
16
16
  from pydantic_ai.mcp import MCPServerSSE, MCPServerStdio, MCPServerStreamableHTTP
17
17
 
18
+ from .async_lifecycle import get_lifecycle_manager
18
19
  from .managed_server import ManagedMCPServer, ServerConfig, ServerState
19
20
  from .registry import ServerRegistry
20
21
  from .status_tracker import ServerStatusTracker
21
- from .async_lifecycle import get_lifecycle_manager
22
22
 
23
23
  # Configure logging
24
24
  logger = logging.getLogger(__name__)
@@ -27,6 +27,7 @@ logger = logging.getLogger(__name__)
27
27
  @dataclass
28
28
  class ServerInfo:
29
29
  """Information about a registered server."""
30
+
30
31
  id: str
31
32
  name: str
32
33
  type: str
@@ -43,18 +44,18 @@ class ServerInfo:
43
44
  class MCPManager:
44
45
  """
45
46
  Central coordinator for all MCP server operations.
46
-
47
+
47
48
  This class manages the lifecycle of MCP servers while maintaining
48
49
  100% pydantic-ai compatibility. It coordinates between the registry,
49
50
  status tracker, and managed servers to provide a unified interface
50
51
  for server management.
51
-
52
+
52
53
  The critical method get_servers_for_agent() returns actual pydantic-ai
53
54
  server instances for use with Agent objects.
54
-
55
+
55
56
  Example usage:
56
57
  manager = get_mcp_manager()
57
-
58
+
58
59
  # Register a server
59
60
  config = ServerConfig(
60
61
  id="", # Auto-generated
@@ -63,42 +64,44 @@ class MCPManager:
63
64
  config={"command": "npx", "args": ["-y", "@modelcontextprotocol/server-filesystem"]}
64
65
  )
65
66
  server_id = manager.register_server(config)
66
-
67
+
67
68
  # Get servers for agent use
68
69
  servers = manager.get_servers_for_agent() # Returns actual pydantic-ai instances
69
70
  """
70
-
71
+
71
72
  def __init__(self):
72
73
  """Initialize the MCP manager with all required components."""
73
74
  # Initialize core components
74
75
  self.registry = ServerRegistry()
75
76
  self.status_tracker = ServerStatusTracker()
76
-
77
+
77
78
  # Active managed servers (server_id -> ManagedMCPServer)
78
79
  self._managed_servers: Dict[str, ManagedMCPServer] = {}
79
-
80
+
80
81
  # Load existing servers from registry
81
82
  self._initialize_servers()
82
-
83
+
83
84
  logger.info("MCPManager initialized with core components")
84
-
85
+
85
86
  def _initialize_servers(self) -> None:
86
87
  """Initialize managed servers from registry configurations."""
87
88
  configs = self.registry.list_all()
88
89
  initialized_count = 0
89
-
90
+
90
91
  for config in configs:
91
92
  try:
92
93
  managed_server = ManagedMCPServer(config)
93
94
  self._managed_servers[config.id] = managed_server
94
-
95
+
95
96
  # Update status tracker - always start as STOPPED
96
97
  # Servers must be explicitly started with /mcp start
97
98
  self.status_tracker.set_status(config.id, ServerState.STOPPED)
98
-
99
+
99
100
  initialized_count += 1
100
- logger.debug(f"Initialized managed server: {config.name} (ID: {config.id})")
101
-
101
+ logger.debug(
102
+ f"Initialized managed server: {config.name} (ID: {config.id})"
103
+ )
104
+
102
105
  except Exception as e:
103
106
  logger.error(f"Failed to initialize server {config.name}: {e}")
104
107
  # Update status tracker with error state
@@ -106,66 +109,74 @@ class MCPManager:
106
109
  self.status_tracker.record_event(
107
110
  config.id,
108
111
  "initialization_error",
109
- {"error": str(e), "message": f"Failed to initialize: {e}"}
112
+ {"error": str(e), "message": f"Failed to initialize: {e}"},
110
113
  )
111
-
114
+
112
115
  logger.info(f"Initialized {initialized_count} servers from registry")
113
-
116
+
114
117
  def register_server(self, config: ServerConfig) -> str:
115
118
  """
116
119
  Register a new server configuration.
117
-
120
+
118
121
  Args:
119
122
  config: Server configuration to register
120
-
123
+
121
124
  Returns:
122
125
  Server ID of the registered server
123
-
126
+
124
127
  Raises:
125
128
  ValueError: If configuration is invalid or server already exists
126
129
  Exception: If server initialization fails
127
130
  """
128
131
  # Register with registry (validates config and assigns ID)
129
132
  server_id = self.registry.register(config)
130
-
133
+
131
134
  try:
132
135
  # Create managed server instance
133
136
  managed_server = ManagedMCPServer(config)
134
137
  self._managed_servers[server_id] = managed_server
135
-
138
+
136
139
  # Update status tracker - always start as STOPPED
137
140
  # Servers must be explicitly started with /mcp start
138
141
  self.status_tracker.set_status(server_id, ServerState.STOPPED)
139
-
142
+
140
143
  # Record registration event
141
144
  self.status_tracker.record_event(
142
145
  server_id,
143
146
  "registered",
144
- {"name": config.name, "type": config.type, "message": "Server registered successfully"}
147
+ {
148
+ "name": config.name,
149
+ "type": config.type,
150
+ "message": "Server registered successfully",
151
+ },
152
+ )
153
+
154
+ logger.info(
155
+ f"Successfully registered server: {config.name} (ID: {server_id})"
145
156
  )
146
-
147
- logger.info(f"Successfully registered server: {config.name} (ID: {server_id})")
148
157
  return server_id
149
-
158
+
150
159
  except Exception as e:
151
160
  # Remove from registry if initialization failed
152
161
  self.registry.unregister(server_id)
153
162
  logger.error(f"Failed to initialize registered server {config.name}: {e}")
154
163
  raise
155
-
156
- def get_servers_for_agent(self) -> List[Union[MCPServerSSE, MCPServerStdio, MCPServerStreamableHTTP]]:
164
+
165
+ def get_servers_for_agent(
166
+ self,
167
+ ) -> List[Union[MCPServerSSE, MCPServerStdio, MCPServerStreamableHTTP]]:
157
168
  """
158
169
  Get pydantic-ai compatible servers for agent use.
159
-
160
- This is the critical method that must return actual pydantic-ai server
170
+
171
+ This is the critical method that must return actual pydantic-ai server
161
172
  instances (not wrappers). Only returns enabled, non-quarantined servers.
162
173
  Handles errors gracefully by logging but not crashing.
163
-
174
+
164
175
  Returns:
165
176
  List of actual pydantic-ai MCP server instances ready for use
166
177
  """
167
178
  servers = []
168
-
179
+
169
180
  for server_id, managed_server in self._managed_servers.items():
170
181
  try:
171
182
  # Only include enabled, non-quarantined servers
@@ -173,15 +184,17 @@ class MCPManager:
173
184
  # Get the actual pydantic-ai server instance
174
185
  pydantic_server = managed_server.get_pydantic_server()
175
186
  servers.append(pydantic_server)
176
-
177
- logger.debug(f"Added server to agent list: {managed_server.config.name}")
187
+
188
+ logger.debug(
189
+ f"Added server to agent list: {managed_server.config.name}"
190
+ )
178
191
  else:
179
192
  logger.debug(
180
193
  f"Skipping server {managed_server.config.name}: "
181
194
  f"enabled={managed_server.is_enabled()}, "
182
195
  f"quarantined={managed_server.is_quarantined()}"
183
196
  )
184
-
197
+
185
198
  except Exception as e:
186
199
  # Log error but don't crash - continue with other servers
187
200
  logger.error(
@@ -191,52 +204,55 @@ class MCPManager:
191
204
  self.status_tracker.record_event(
192
205
  server_id,
193
206
  "agent_access_error",
194
- {"error": str(e), "message": f"Error accessing server for agent: {e}"}
207
+ {
208
+ "error": str(e),
209
+ "message": f"Error accessing server for agent: {e}",
210
+ },
195
211
  )
196
212
  continue
197
-
213
+
198
214
  logger.debug(f"Returning {len(servers)} servers for agent use")
199
215
  return servers
200
-
216
+
201
217
  def get_server(self, server_id: str) -> Optional[ManagedMCPServer]:
202
218
  """
203
219
  Get managed server by ID.
204
-
220
+
205
221
  Args:
206
222
  server_id: ID of server to retrieve
207
-
223
+
208
224
  Returns:
209
225
  ManagedMCPServer instance if found, None otherwise
210
226
  """
211
227
  return self._managed_servers.get(server_id)
212
-
228
+
213
229
  def get_server_by_name(self, name: str) -> Optional[ServerConfig]:
214
230
  """
215
231
  Get server configuration by name.
216
-
232
+
217
233
  Args:
218
234
  name: Name of server to retrieve
219
-
235
+
220
236
  Returns:
221
237
  ServerConfig if found, None otherwise
222
238
  """
223
239
  return self.registry.get_by_name(name)
224
-
240
+
225
241
  def update_server(self, server_id: str, config: ServerConfig) -> bool:
226
242
  """
227
243
  Update server configuration.
228
-
244
+
229
245
  Args:
230
246
  server_id: ID of server to update
231
247
  config: New configuration
232
-
248
+
233
249
  Returns:
234
250
  True if server was updated, False if not found
235
251
  """
236
252
  # Update in registry
237
253
  if not self.registry.update(server_id, config):
238
254
  return False
239
-
255
+
240
256
  # Update managed server if it exists
241
257
  managed_server = self._managed_servers.get(server_id)
242
258
  if managed_server:
@@ -244,36 +260,36 @@ class MCPManager:
244
260
  # Clear cached server to force recreation on next use
245
261
  managed_server.server = None
246
262
  logger.info(f"Updated server configuration: {config.name}")
247
-
263
+
248
264
  return True
249
-
265
+
250
266
  def list_servers(self) -> List[ServerInfo]:
251
267
  """
252
268
  Get information about all registered servers.
253
-
269
+
254
270
  Returns:
255
271
  List of ServerInfo objects with current status
256
272
  """
257
273
  server_infos = []
258
-
274
+
259
275
  for server_id, managed_server in self._managed_servers.items():
260
276
  try:
261
277
  status = managed_server.get_status()
262
278
  uptime = self.status_tracker.get_uptime(server_id)
263
279
  summary = self.status_tracker.get_server_summary(server_id)
264
-
280
+
265
281
  # Get health information from metadata
266
282
  health_info = self.status_tracker.get_metadata(server_id, "health")
267
283
  if health_info is None:
268
284
  # Create basic health info based on state
269
285
  health_info = {
270
286
  "is_healthy": status["state"] == "running",
271
- "error": status.get("error_message")
287
+ "error": status.get("error_message"),
272
288
  }
273
-
289
+
274
290
  # Get latency from metadata
275
291
  latency_ms = self.status_tracker.get_metadata(server_id, "latency_ms")
276
-
292
+
277
293
  server_info = ServerInfo(
278
294
  id=server_id,
279
295
  name=managed_server.config.name,
@@ -285,11 +301,11 @@ class MCPManager:
285
301
  error_message=status.get("error_message"),
286
302
  health=health_info,
287
303
  start_time=summary.get("start_time"),
288
- latency_ms=latency_ms
304
+ latency_ms=latency_ms,
289
305
  )
290
-
306
+
291
307
  server_infos.append(server_info)
292
-
308
+
293
309
  except Exception as e:
294
310
  logger.error(f"Error getting info for server {server_id}: {e}")
295
311
  # Create error info
@@ -306,23 +322,23 @@ class MCPManager:
306
322
  error_message=str(e),
307
323
  health={"is_healthy": False, "error": str(e)},
308
324
  start_time=None,
309
- latency_ms=None
325
+ latency_ms=None,
310
326
  )
311
327
  server_infos.append(server_info)
312
-
328
+
313
329
  return server_infos
314
-
330
+
315
331
  async def start_server(self, server_id: str) -> bool:
316
332
  """
317
333
  Start a server (enable it and start the subprocess/connection).
318
-
334
+
319
335
  This both enables the server for agent use AND starts the actual process.
320
336
  For stdio servers, this starts the subprocess.
321
337
  For SSE/HTTP servers, this establishes the connection.
322
-
338
+
323
339
  Args:
324
340
  server_id: ID of server to start
325
-
341
+
326
342
  Returns:
327
343
  True if server was started, False if not found or failed
328
344
  """
@@ -330,35 +346,39 @@ class MCPManager:
330
346
  if managed_server is None:
331
347
  logger.warning(f"Attempted to start non-existent server: {server_id}")
332
348
  return False
333
-
349
+
334
350
  try:
335
351
  # First enable the server
336
352
  managed_server.enable()
337
353
  self.status_tracker.set_status(server_id, ServerState.RUNNING)
338
354
  self.status_tracker.record_start_time(server_id)
339
-
355
+
340
356
  # Try to actually start it if we have an async context
341
357
  try:
342
358
  # Get the pydantic-ai server instance
343
359
  pydantic_server = managed_server.get_pydantic_server()
344
-
360
+
345
361
  # Start the server using the async lifecycle manager
346
362
  lifecycle_mgr = get_lifecycle_manager()
347
363
  started = await lifecycle_mgr.start_server(server_id, pydantic_server)
348
-
364
+
349
365
  if started:
350
- logger.info(f"Started server process: {managed_server.config.name} (ID: {server_id})")
366
+ logger.info(
367
+ f"Started server process: {managed_server.config.name} (ID: {server_id})"
368
+ )
351
369
  self.status_tracker.record_event(
352
370
  server_id,
353
371
  "started",
354
- {"message": "Server started and process running"}
372
+ {"message": "Server started and process running"},
355
373
  )
356
374
  else:
357
- logger.warning(f"Could not start process for server {server_id}, but it's enabled")
375
+ logger.warning(
376
+ f"Could not start process for server {server_id}, but it's enabled"
377
+ )
358
378
  self.status_tracker.record_event(
359
379
  server_id,
360
380
  "enabled",
361
- {"message": "Server enabled (process will start when used)"}
381
+ {"message": "Server enabled (process will start when used)"},
362
382
  )
363
383
  except Exception as e:
364
384
  # Process start failed, but server is still enabled
@@ -366,42 +386,42 @@ class MCPManager:
366
386
  self.status_tracker.record_event(
367
387
  server_id,
368
388
  "enabled",
369
- {"message": "Server enabled (process will start when used)"}
389
+ {"message": "Server enabled (process will start when used)"},
370
390
  )
371
-
391
+
372
392
  return True
373
-
393
+
374
394
  except Exception as e:
375
395
  logger.error(f"Failed to start server {server_id}: {e}")
376
396
  self.status_tracker.set_status(server_id, ServerState.ERROR)
377
397
  self.status_tracker.record_event(
378
398
  server_id,
379
399
  "start_error",
380
- {"error": str(e), "message": f"Error starting server: {e}"}
400
+ {"error": str(e), "message": f"Error starting server: {e}"},
381
401
  )
382
402
  return False
383
-
403
+
384
404
  def start_server_sync(self, server_id: str) -> bool:
385
405
  """
386
406
  Synchronous wrapper for start_server.
387
407
  """
388
408
  try:
389
- loop = asyncio.get_running_loop()
409
+ asyncio.get_running_loop()
390
410
  # We're in an async context, but we need to wait for completion
391
411
  # Create a future and schedule the coroutine
392
- import concurrent.futures
393
-
412
+
394
413
  # Use run_in_executor to run the async function synchronously
395
414
  async def run_async():
396
415
  return await self.start_server(server_id)
397
-
416
+
398
417
  # Schedule the task and wait briefly for it to complete
399
418
  task = asyncio.create_task(run_async())
400
-
419
+
401
420
  # Give it a moment to complete - this fixes the race condition
402
421
  import time
422
+
403
423
  time.sleep(0.1) # Small delay to let async tasks progress
404
-
424
+
405
425
  # Check if task completed, if not, fall back to sync enable
406
426
  if task.done():
407
427
  try:
@@ -409,7 +429,7 @@ class MCPManager:
409
429
  return result
410
430
  except Exception:
411
431
  pass
412
-
432
+
413
433
  # If async didn't complete, enable synchronously
414
434
  managed_server = self._managed_servers.get(server_id)
415
435
  if managed_server:
@@ -419,7 +439,7 @@ class MCPManager:
419
439
  logger.info(f"Enabled server synchronously: {server_id}")
420
440
  return True
421
441
  return False
422
-
442
+
423
443
  except RuntimeError:
424
444
  # No async loop, just enable the server
425
445
  managed_server = self._managed_servers.get(server_id)
@@ -430,18 +450,18 @@ class MCPManager:
430
450
  logger.info(f"Enabled server (no async context): {server_id}")
431
451
  return True
432
452
  return False
433
-
453
+
434
454
  async def stop_server(self, server_id: str) -> bool:
435
455
  """
436
456
  Stop a server (disable it and stop the subprocess/connection).
437
-
457
+
438
458
  This both disables the server AND stops any running process.
439
459
  For stdio servers, this stops the subprocess.
440
460
  For SSE/HTTP servers, this closes the connection.
441
-
461
+
442
462
  Args:
443
463
  server_id: ID of server to stop
444
-
464
+
445
465
  Returns:
446
466
  True if server was stopped, False if not found
447
467
  """
@@ -449,70 +469,70 @@ class MCPManager:
449
469
  if managed_server is None:
450
470
  logger.warning(f"Attempted to stop non-existent server: {server_id}")
451
471
  return False
452
-
472
+
453
473
  try:
454
474
  # First disable the server
455
475
  managed_server.disable()
456
476
  self.status_tracker.set_status(server_id, ServerState.STOPPED)
457
477
  self.status_tracker.record_stop_time(server_id)
458
-
478
+
459
479
  # Try to actually stop it if we have an async context
460
480
  try:
461
481
  # Stop the server using the async lifecycle manager
462
482
  lifecycle_mgr = get_lifecycle_manager()
463
483
  stopped = await lifecycle_mgr.stop_server(server_id)
464
-
484
+
465
485
  if stopped:
466
- logger.info(f"Stopped server process: {managed_server.config.name} (ID: {server_id})")
486
+ logger.info(
487
+ f"Stopped server process: {managed_server.config.name} (ID: {server_id})"
488
+ )
467
489
  self.status_tracker.record_event(
468
490
  server_id,
469
491
  "stopped",
470
- {"message": "Server stopped and process terminated"}
492
+ {"message": "Server stopped and process terminated"},
471
493
  )
472
494
  else:
473
495
  logger.info(f"Server {server_id} disabled (no process was running)")
474
496
  self.status_tracker.record_event(
475
- server_id,
476
- "disabled",
477
- {"message": "Server disabled"}
497
+ server_id, "disabled", {"message": "Server disabled"}
478
498
  )
479
499
  except Exception as e:
480
500
  # Process stop failed, but server is still disabled
481
501
  logger.warning(f"Could not stop process for server {server_id}: {e}")
482
502
  self.status_tracker.record_event(
483
- server_id,
484
- "disabled",
485
- {"message": "Server disabled"}
503
+ server_id, "disabled", {"message": "Server disabled"}
486
504
  )
487
-
505
+
488
506
  return True
489
-
507
+
490
508
  except Exception as e:
491
509
  logger.error(f"Failed to stop server {server_id}: {e}")
492
510
  self.status_tracker.record_event(
493
511
  server_id,
494
512
  "stop_error",
495
- {"error": str(e), "message": f"Error stopping server: {e}"}
513
+ {"error": str(e), "message": f"Error stopping server: {e}"},
496
514
  )
497
515
  return False
498
-
516
+
499
517
  def stop_server_sync(self, server_id: str) -> bool:
500
518
  """
501
519
  Synchronous wrapper for stop_server.
502
520
  """
503
521
  try:
504
- loop = asyncio.get_running_loop()
522
+ asyncio.get_running_loop()
523
+
505
524
  # We're in an async context, but we need to wait for completion
506
525
  async def run_async():
507
526
  return await self.stop_server(server_id)
508
-
527
+
509
528
  # Schedule the task and wait briefly for it to complete
510
529
  task = asyncio.create_task(run_async())
511
-
530
+
512
531
  # Give it a moment to complete - this fixes the race condition
513
532
  import time
533
+
514
534
  time.sleep(0.1) # Small delay to let async tasks progress
515
-
535
+
516
536
  # Check if task completed, if not, fall back to sync disable
517
537
  if task.done():
518
538
  try:
@@ -520,7 +540,7 @@ class MCPManager:
520
540
  return result
521
541
  except Exception:
522
542
  pass
523
-
543
+
524
544
  # If async didn't complete, disable synchronously
525
545
  managed_server = self._managed_servers.get(server_id)
526
546
  if managed_server:
@@ -530,7 +550,7 @@ class MCPManager:
530
550
  logger.info(f"Disabled server synchronously: {server_id}")
531
551
  return True
532
552
  return False
533
-
553
+
534
554
  except RuntimeError:
535
555
  # No async loop, just disable the server
536
556
  managed_server = self._managed_servers.get(server_id)
@@ -541,14 +561,14 @@ class MCPManager:
541
561
  logger.info(f"Disabled server (no async context): {server_id}")
542
562
  return True
543
563
  return False
544
-
564
+
545
565
  def reload_server(self, server_id: str) -> bool:
546
566
  """
547
567
  Reload a server configuration.
548
-
568
+
549
569
  Args:
550
570
  server_id: ID of server to reload
551
-
571
+
552
572
  Returns:
553
573
  True if server was reloaded, False if not found or failed
554
574
  """
@@ -556,85 +576,81 @@ class MCPManager:
556
576
  if config is None:
557
577
  logger.warning(f"Attempted to reload non-existent server: {server_id}")
558
578
  return False
559
-
579
+
560
580
  try:
561
581
  # Remove old managed server
562
582
  if server_id in self._managed_servers:
563
583
  old_server = self._managed_servers[server_id]
564
584
  logger.debug(f"Removing old server instance: {old_server.config.name}")
565
585
  del self._managed_servers[server_id]
566
-
586
+
567
587
  # Create new managed server
568
588
  managed_server = ManagedMCPServer(config)
569
589
  self._managed_servers[server_id] = managed_server
570
-
590
+
571
591
  # Update status tracker - always start as STOPPED
572
592
  # Servers must be explicitly started with /mcp start
573
593
  self.status_tracker.set_status(server_id, ServerState.STOPPED)
574
-
594
+
575
595
  # Record reload event
576
596
  self.status_tracker.record_event(
577
- server_id,
578
- "reloaded",
579
- {"message": "Server configuration reloaded"}
597
+ server_id, "reloaded", {"message": "Server configuration reloaded"}
580
598
  )
581
-
599
+
582
600
  logger.info(f"Reloaded server: {config.name} (ID: {server_id})")
583
601
  return True
584
-
602
+
585
603
  except Exception as e:
586
604
  logger.error(f"Failed to reload server {server_id}: {e}")
587
605
  self.status_tracker.set_status(server_id, ServerState.ERROR)
588
606
  self.status_tracker.record_event(
589
607
  server_id,
590
608
  "reload_error",
591
- {"error": str(e), "message": f"Error reloading server: {e}"}
609
+ {"error": str(e), "message": f"Error reloading server: {e}"},
592
610
  )
593
611
  return False
594
-
612
+
595
613
  def remove_server(self, server_id: str) -> bool:
596
614
  """
597
615
  Remove a server completely.
598
-
616
+
599
617
  Args:
600
618
  server_id: ID of server to remove
601
-
619
+
602
620
  Returns:
603
621
  True if server was removed, False if not found
604
622
  """
605
623
  # Get server name for logging
606
624
  config = self.registry.get(server_id)
607
625
  server_name = config.name if config else server_id
608
-
626
+
609
627
  # Remove from registry
610
628
  registry_removed = self.registry.unregister(server_id)
611
-
629
+
612
630
  # Remove from managed servers
613
631
  managed_removed = False
614
632
  if server_id in self._managed_servers:
615
633
  del self._managed_servers[server_id]
616
634
  managed_removed = True
617
-
635
+
618
636
  # Record removal event if server existed
619
637
  if registry_removed or managed_removed:
620
638
  self.status_tracker.record_event(
621
- server_id,
622
- "removed",
623
- {"message": "Server removed"}
639
+ server_id, "removed", {"message": "Server removed"}
624
640
  )
625
641
  logger.info(f"Removed server: {server_name} (ID: {server_id})")
626
642
  return True
627
643
  else:
628
644
  logger.warning(f"Attempted to remove non-existent server: {server_id}")
629
645
  return False
630
-
646
+
631
647
  def get_server_status(self, server_id: str) -> Dict[str, Any]:
632
648
  """
633
649
  Get comprehensive status for a server.
634
-
650
+
635
651
  Args:
636
652
  server_id: ID of server to get status for
637
-
653
+
638
654
  Returns:
639
655
  Dictionary containing comprehensive status information
640
656
  """
@@ -644,17 +660,17 @@ class MCPManager:
644
660
  return {
645
661
  "server_id": server_id,
646
662
  "exists": False,
647
- "error": "Server not found"
663
+ "error": "Server not found",
648
664
  }
649
-
665
+
650
666
  try:
651
667
  # Get status from managed server
652
668
  status = managed_server.get_status()
653
-
669
+
654
670
  # Add status tracker information
655
671
  tracker_summary = self.status_tracker.get_server_summary(server_id)
656
672
  recent_events = self.status_tracker.get_events(server_id, limit=5)
657
-
673
+
658
674
  # Combine all information
659
675
  comprehensive_status = {
660
676
  **status, # Include all managed server status
@@ -667,21 +683,17 @@ class MCPManager:
667
683
  {
668
684
  "timestamp": event.timestamp.isoformat(),
669
685
  "event_type": event.event_type,
670
- "details": event.details
686
+ "details": event.details,
671
687
  }
672
688
  for event in recent_events
673
- ]
689
+ ],
674
690
  }
675
-
691
+
676
692
  return comprehensive_status
677
-
693
+
678
694
  except Exception as e:
679
695
  logger.error(f"Error getting status for server {server_id}: {e}")
680
- return {
681
- "server_id": server_id,
682
- "exists": True,
683
- "error": str(e)
684
- }
696
+ return {"server_id": server_id, "exists": True, "error": str(e)}
685
697
 
686
698
 
687
699
  # Singleton instance
@@ -691,11 +703,11 @@ _manager_instance: Optional[MCPManager] = None
691
703
  def get_mcp_manager() -> MCPManager:
692
704
  """
693
705
  Get the singleton MCPManager instance.
694
-
706
+
695
707
  Returns:
696
708
  The global MCPManager instance
697
709
  """
698
710
  global _manager_instance
699
711
  if _manager_instance is None:
700
712
  _manager_instance = MCPManager()
701
- return _manager_instance
713
+ return _manager_instance