mcp-mesh 0.5.4__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _mcp_mesh/__init__.py +5 -2
- _mcp_mesh/engine/decorator_registry.py +95 -0
- _mcp_mesh/engine/mcp_client_proxy.py +17 -7
- _mcp_mesh/engine/unified_mcp_proxy.py +43 -40
- _mcp_mesh/pipeline/api_startup/fastapi_discovery.py +4 -167
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +4 -0
- _mcp_mesh/pipeline/mcp_heartbeat/lifespan_integration.py +13 -0
- _mcp_mesh/pipeline/mcp_startup/__init__.py +2 -0
- _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +306 -163
- _mcp_mesh/pipeline/mcp_startup/server_discovery.py +164 -0
- _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +198 -160
- _mcp_mesh/pipeline/mcp_startup/startup_pipeline.py +7 -4
- _mcp_mesh/pipeline/shared/mesh_pipeline.py +4 -0
- _mcp_mesh/shared/server_discovery.py +312 -0
- _mcp_mesh/shared/simple_shutdown.py +217 -0
- {mcp_mesh-0.5.4.dist-info → mcp_mesh-0.5.6.dist-info}/METADATA +1 -1
- {mcp_mesh-0.5.4.dist-info → mcp_mesh-0.5.6.dist-info}/RECORD +20 -18
- mesh/decorators.py +303 -36
- _mcp_mesh/engine/threading_utils.py +0 -223
- {mcp_mesh-0.5.4.dist-info → mcp_mesh-0.5.6.dist-info}/WHEEL +0 -0
- {mcp_mesh-0.5.4.dist-info → mcp_mesh-0.5.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -4,7 +4,7 @@ import os
|
|
|
4
4
|
import socket
|
|
5
5
|
import time
|
|
6
6
|
from datetime import UTC, datetime
|
|
7
|
-
from typing import Any,
|
|
7
|
+
from typing import Any, Optional
|
|
8
8
|
|
|
9
9
|
from ..shared import PipelineResult, PipelineStatus, PipelineStep
|
|
10
10
|
|
|
@@ -36,6 +36,11 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
36
36
|
agent_config = context.get("agent_config", {})
|
|
37
37
|
fastmcp_servers = context.get("fastmcp_servers", {})
|
|
38
38
|
|
|
39
|
+
# Check for existing server from ServerDiscoveryStep
|
|
40
|
+
existing_server = context.get("existing_server")
|
|
41
|
+
existing_fastapi_app = context.get("existing_fastapi_app")
|
|
42
|
+
server_reuse = context.get("server_reuse", False)
|
|
43
|
+
|
|
39
44
|
# Check if HTTP transport is enabled
|
|
40
45
|
if not self._is_http_enabled():
|
|
41
46
|
result.status = PipelineStatus.SKIPPED
|
|
@@ -47,6 +52,22 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
47
52
|
binding_config = self._resolve_binding_config(agent_config)
|
|
48
53
|
advertisement_config = self._resolve_advertisement_config(agent_config)
|
|
49
54
|
|
|
55
|
+
# Handle existing server case - mount FastMCP with proper lifespan integration
|
|
56
|
+
if server_reuse and existing_server:
|
|
57
|
+
self.logger.info(
|
|
58
|
+
"🔄 SERVER REUSE: Found existing server, will mount FastMCP with proper lifespan integration"
|
|
59
|
+
)
|
|
60
|
+
return await self._handle_existing_server(
|
|
61
|
+
context,
|
|
62
|
+
result,
|
|
63
|
+
existing_server,
|
|
64
|
+
existing_fastapi_app,
|
|
65
|
+
fastmcp_servers,
|
|
66
|
+
agent_config,
|
|
67
|
+
binding_config,
|
|
68
|
+
advertisement_config,
|
|
69
|
+
)
|
|
70
|
+
|
|
50
71
|
# Get heartbeat config for lifespan integration
|
|
51
72
|
heartbeat_config = context.get("heartbeat_config")
|
|
52
73
|
|
|
@@ -75,6 +96,8 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
75
96
|
result.add_error(f"Failed to wrap server '{server_key}': {e}")
|
|
76
97
|
|
|
77
98
|
# Create FastAPI application with proper FastMCP lifespan integration (AFTER wrappers)
|
|
99
|
+
# Store context for shutdown coordination
|
|
100
|
+
self._current_context = context
|
|
78
101
|
fastapi_app = self._create_fastapi_app(
|
|
79
102
|
agent_config, fastmcp_servers, heartbeat_config, mcp_wrappers
|
|
80
103
|
)
|
|
@@ -116,6 +139,25 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
116
139
|
result.add_context("fastapi_binding_config", binding_config)
|
|
117
140
|
result.add_context("fastapi_advertisement_config", advertisement_config)
|
|
118
141
|
|
|
142
|
+
# Set shutdown context for signal handlers with FastAPI app
|
|
143
|
+
try:
|
|
144
|
+
from mesh.decorators import set_shutdown_context
|
|
145
|
+
|
|
146
|
+
shutdown_context = {
|
|
147
|
+
"fastapi_app": fastapi_app,
|
|
148
|
+
"registry_url": context.get("registry_url"),
|
|
149
|
+
"agent_id": context.get("agent_id"),
|
|
150
|
+
"registry_wrapper": context.get("registry_wrapper"),
|
|
151
|
+
}
|
|
152
|
+
set_shutdown_context(shutdown_context)
|
|
153
|
+
self.logger.debug("🔧 Shutdown context set for signal handlers")
|
|
154
|
+
except Exception as e:
|
|
155
|
+
self.logger.warning(f"⚠️ Failed to set shutdown context: {e}")
|
|
156
|
+
|
|
157
|
+
# Pass through server reuse information to orchestrator
|
|
158
|
+
result.add_context("server_reused", server_reuse)
|
|
159
|
+
result.add_context("existing_server", existing_server)
|
|
160
|
+
|
|
119
161
|
bind_host = binding_config["bind_host"]
|
|
120
162
|
bind_port = binding_config["bind_port"]
|
|
121
163
|
external_host = advertisement_config["external_host"]
|
|
@@ -200,7 +242,7 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
200
242
|
"description", "MCP Mesh Agent with FastAPI integration"
|
|
201
243
|
)
|
|
202
244
|
|
|
203
|
-
#
|
|
245
|
+
# Simplified lifespan - heartbeat now handled by daemon thread
|
|
204
246
|
primary_lifespan = None
|
|
205
247
|
|
|
206
248
|
# Helper function to get FastMCP lifespan from wrapper
|
|
@@ -216,113 +258,46 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
216
258
|
return mcp_wrapper._mcp_app.lifespan
|
|
217
259
|
return None
|
|
218
260
|
|
|
219
|
-
if
|
|
220
|
-
# Single FastMCP server
|
|
221
|
-
self.logger.debug(
|
|
222
|
-
"Creating combined lifespan for single FastMCP server with heartbeat"
|
|
223
|
-
)
|
|
224
|
-
|
|
261
|
+
if len(fastmcp_servers) == 1:
|
|
262
|
+
# Single FastMCP server - simple lifespan with graceful shutdown
|
|
263
|
+
self.logger.debug("Creating simple lifespan for single FastMCP server")
|
|
225
264
|
fastmcp_lifespan = get_fastmcp_lifespan()
|
|
226
|
-
if not fastmcp_lifespan:
|
|
227
|
-
self.logger.warning(
|
|
228
|
-
"No FastMCP lifespan available for heartbeat combination"
|
|
229
|
-
)
|
|
230
265
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
if fastmcp_lifespan:
|
|
266
|
+
if fastmcp_lifespan:
|
|
267
|
+
|
|
268
|
+
@asynccontextmanager
|
|
269
|
+
async def simple_fastmcp_lifespan(main_app):
|
|
270
|
+
"""Simple lifespan for single FastMCP server."""
|
|
271
|
+
fastmcp_ctx = None
|
|
238
272
|
try:
|
|
239
273
|
fastmcp_ctx = fastmcp_lifespan(main_app)
|
|
240
274
|
await fastmcp_ctx.__aenter__()
|
|
241
|
-
self.logger.debug(
|
|
242
|
-
"Started FastMCP lifespan with main app context"
|
|
243
|
-
)
|
|
275
|
+
self.logger.debug("Started FastMCP lifespan")
|
|
244
276
|
except Exception as e:
|
|
245
277
|
self.logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
246
278
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
heartbeat_task_fn(heartbeat_config)
|
|
253
|
-
)
|
|
254
|
-
self.logger.info(
|
|
255
|
-
f"💓 Started heartbeat task with {heartbeat_config['interval']}s interval"
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
try:
|
|
259
|
-
yield
|
|
260
|
-
finally:
|
|
261
|
-
# Graceful shutdown - unregister from registry
|
|
262
|
-
await self._graceful_shutdown(main_app)
|
|
263
|
-
|
|
264
|
-
# Clean up heartbeat task
|
|
265
|
-
if heartbeat_task:
|
|
266
|
-
heartbeat_task.cancel()
|
|
267
|
-
try:
|
|
268
|
-
await heartbeat_task
|
|
269
|
-
except asyncio.CancelledError:
|
|
270
|
-
self.logger.info(
|
|
271
|
-
"🛑 Heartbeat task cancelled during shutdown"
|
|
272
|
-
)
|
|
273
|
-
|
|
274
|
-
# Clean up FastMCP lifespan
|
|
275
|
-
if fastmcp_ctx:
|
|
276
|
-
try:
|
|
277
|
-
await fastmcp_ctx.__aexit__(None, None, None)
|
|
278
|
-
self.logger.debug("FastMCP lifespan stopped")
|
|
279
|
-
except Exception as e:
|
|
280
|
-
self.logger.warning(
|
|
281
|
-
f"Error closing FastMCP lifespan: {e}"
|
|
282
|
-
)
|
|
283
|
-
|
|
284
|
-
primary_lifespan = single_fastmcp_with_heartbeat_lifespan
|
|
285
|
-
|
|
286
|
-
elif heartbeat_config and len(fastmcp_servers) == 0:
|
|
287
|
-
# Heartbeat only - no FastMCP servers
|
|
288
|
-
self.logger.debug(
|
|
289
|
-
"Creating lifespan for heartbeat only (no FastMCP servers)"
|
|
290
|
-
)
|
|
291
|
-
|
|
292
|
-
@asynccontextmanager
|
|
293
|
-
async def heartbeat_only_lifespan(main_app):
|
|
294
|
-
"""Lifespan manager for heartbeat only."""
|
|
295
|
-
# Start heartbeat task
|
|
296
|
-
heartbeat_task = None
|
|
297
|
-
heartbeat_task_fn = heartbeat_config.get("heartbeat_task_fn")
|
|
298
|
-
if heartbeat_task_fn:
|
|
299
|
-
heartbeat_task = asyncio.create_task(
|
|
300
|
-
heartbeat_task_fn(heartbeat_config)
|
|
301
|
-
)
|
|
302
|
-
self.logger.info(
|
|
303
|
-
f"💓 Started heartbeat task with {heartbeat_config['interval']}s interval"
|
|
304
|
-
)
|
|
305
|
-
|
|
306
|
-
try:
|
|
307
|
-
yield
|
|
308
|
-
finally:
|
|
309
|
-
# Graceful shutdown - unregister from registry
|
|
310
|
-
await self._graceful_shutdown(main_app)
|
|
279
|
+
try:
|
|
280
|
+
yield
|
|
281
|
+
finally:
|
|
282
|
+
# Graceful shutdown - unregister from registry
|
|
283
|
+
await self._graceful_shutdown(main_app)
|
|
311
284
|
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
285
|
+
# Clean up FastMCP lifespan
|
|
286
|
+
if fastmcp_ctx:
|
|
287
|
+
try:
|
|
288
|
+
await fastmcp_ctx.__aexit__(None, None, None)
|
|
289
|
+
self.logger.debug("FastMCP lifespan stopped")
|
|
290
|
+
except Exception as e:
|
|
291
|
+
self.logger.warning(
|
|
292
|
+
f"Error closing FastMCP lifespan: {e}"
|
|
293
|
+
)
|
|
321
294
|
|
|
322
|
-
|
|
295
|
+
primary_lifespan = simple_fastmcp_lifespan
|
|
296
|
+
else:
|
|
297
|
+
primary_lifespan = None
|
|
323
298
|
|
|
324
299
|
elif len(fastmcp_servers) > 1:
|
|
325
|
-
# Multiple FastMCP servers -
|
|
300
|
+
# Multiple FastMCP servers - combine lifespans
|
|
326
301
|
self.logger.debug(
|
|
327
302
|
"Creating combined lifespan for multiple FastMCP servers"
|
|
328
303
|
)
|
|
@@ -340,87 +315,57 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
340
315
|
self.logger.debug(
|
|
341
316
|
f"Collected lifespan from FastMCP wrapper '{server_key}'"
|
|
342
317
|
)
|
|
343
|
-
else:
|
|
344
|
-
self.logger.warning(
|
|
345
|
-
f"No lifespan available from wrapper '{server_key}'"
|
|
346
|
-
)
|
|
347
318
|
|
|
348
|
-
|
|
349
|
-
@asynccontextmanager
|
|
350
|
-
async def multiple_fastmcp_lifespan(main_app):
|
|
351
|
-
"""Combined lifespan manager for multiple FastMCP servers."""
|
|
352
|
-
# Start all FastMCP lifespans
|
|
353
|
-
lifespan_contexts = []
|
|
354
|
-
for lifespan in fastmcp_lifespans:
|
|
355
|
-
try:
|
|
356
|
-
ctx = lifespan(main_app)
|
|
357
|
-
await ctx.__aenter__()
|
|
358
|
-
lifespan_contexts.append(ctx)
|
|
359
|
-
except Exception as e:
|
|
360
|
-
self.logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
319
|
+
if fastmcp_lifespans:
|
|
361
320
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
# Clean up all lifespans in reverse order
|
|
369
|
-
for ctx in reversed(lifespan_contexts):
|
|
321
|
+
@asynccontextmanager
|
|
322
|
+
async def multiple_fastmcp_lifespan(main_app):
|
|
323
|
+
"""Combined lifespan for multiple FastMCP servers."""
|
|
324
|
+
lifespan_contexts = []
|
|
325
|
+
for lifespan in fastmcp_lifespans:
|
|
370
326
|
try:
|
|
371
|
-
|
|
327
|
+
ctx = lifespan(main_app)
|
|
328
|
+
await ctx.__aenter__()
|
|
329
|
+
lifespan_contexts.append(ctx)
|
|
372
330
|
except Exception as e:
|
|
373
|
-
self.logger.
|
|
374
|
-
f"
|
|
331
|
+
self.logger.error(
|
|
332
|
+
f"Failed to start FastMCP lifespan: {e}"
|
|
375
333
|
)
|
|
376
334
|
|
|
377
|
-
primary_lifespan = multiple_fastmcp_lifespan
|
|
378
|
-
|
|
379
|
-
elif len(fastmcp_servers) == 1:
|
|
380
|
-
# Single FastMCP server without heartbeat - wrap lifespan with graceful shutdown
|
|
381
|
-
self.logger.debug(
|
|
382
|
-
"Wrapping FastMCP lifespan for single server with graceful shutdown"
|
|
383
|
-
)
|
|
384
|
-
fastmcp_lifespan = get_fastmcp_lifespan()
|
|
385
|
-
|
|
386
|
-
if fastmcp_lifespan:
|
|
387
|
-
# Wrap the FastMCP lifespan with graceful shutdown
|
|
388
|
-
@asynccontextmanager
|
|
389
|
-
async def single_fastmcp_with_graceful_shutdown(main_app):
|
|
390
|
-
"""Lifespan wrapper for single FastMCP server with graceful shutdown."""
|
|
391
|
-
# Start FastMCP lifespan
|
|
392
|
-
fastmcp_ctx = None
|
|
393
|
-
try:
|
|
394
|
-
fastmcp_ctx = fastmcp_lifespan(main_app)
|
|
395
|
-
await fastmcp_ctx.__aenter__()
|
|
396
|
-
self.logger.debug("Started FastMCP lifespan")
|
|
397
|
-
except Exception as e:
|
|
398
|
-
self.logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
399
|
-
|
|
400
335
|
try:
|
|
401
336
|
yield
|
|
402
337
|
finally:
|
|
403
|
-
#
|
|
404
|
-
|
|
338
|
+
# Registry cleanup using simple shutdown
|
|
339
|
+
context = getattr(self, "_current_context", {})
|
|
340
|
+
registry_url = context.get(
|
|
341
|
+
"registry_url", "http://localhost:8001"
|
|
342
|
+
)
|
|
343
|
+
agent_id = context.get("agent_id", "unknown")
|
|
405
344
|
|
|
406
|
-
|
|
407
|
-
|
|
345
|
+
try:
|
|
346
|
+
from ...shared.simple_shutdown import (
|
|
347
|
+
_simple_shutdown_coordinator,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
_simple_shutdown_coordinator.set_shutdown_context(
|
|
351
|
+
registry_url, agent_id
|
|
352
|
+
)
|
|
353
|
+
await _simple_shutdown_coordinator.perform_registry_cleanup()
|
|
354
|
+
except Exception as e:
|
|
355
|
+
self.logger.error(f"❌ Registry cleanup error: {e}")
|
|
356
|
+
|
|
357
|
+
# Clean up all lifespans in reverse order
|
|
358
|
+
for ctx in reversed(lifespan_contexts):
|
|
408
359
|
try:
|
|
409
|
-
await
|
|
410
|
-
self.logger.debug("FastMCP lifespan stopped")
|
|
360
|
+
await ctx.__aexit__(None, None, None)
|
|
411
361
|
except Exception as e:
|
|
412
362
|
self.logger.warning(
|
|
413
363
|
f"Error closing FastMCP lifespan: {e}"
|
|
414
364
|
)
|
|
415
365
|
|
|
416
|
-
primary_lifespan =
|
|
417
|
-
else:
|
|
418
|
-
self.logger.warning(
|
|
419
|
-
"No FastMCP lifespan available for single server"
|
|
420
|
-
)
|
|
421
|
-
primary_lifespan = None
|
|
366
|
+
primary_lifespan = multiple_fastmcp_lifespan
|
|
422
367
|
|
|
423
|
-
# Add minimal graceful shutdown lifespan if no
|
|
368
|
+
# Add minimal graceful shutdown lifespan if no FastMCP lifespans found
|
|
424
369
|
if primary_lifespan is None:
|
|
425
370
|
self.logger.debug(
|
|
426
371
|
"Creating minimal lifespan for graceful shutdown only"
|
|
@@ -432,8 +377,24 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
432
377
|
try:
|
|
433
378
|
yield
|
|
434
379
|
finally:
|
|
435
|
-
#
|
|
436
|
-
|
|
380
|
+
# Registry cleanup using simple shutdown
|
|
381
|
+
context = getattr(self, "_current_context", {})
|
|
382
|
+
registry_url = context.get(
|
|
383
|
+
"registry_url", "http://localhost:8001"
|
|
384
|
+
)
|
|
385
|
+
agent_id = context.get("agent_id", "unknown")
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
from ...shared.simple_shutdown import (
|
|
389
|
+
_simple_shutdown_coordinator,
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
_simple_shutdown_coordinator.set_shutdown_context(
|
|
393
|
+
registry_url, agent_id
|
|
394
|
+
)
|
|
395
|
+
await _simple_shutdown_coordinator.perform_registry_cleanup()
|
|
396
|
+
except Exception as e:
|
|
397
|
+
self.logger.error(f"❌ Registry cleanup error: {e}")
|
|
437
398
|
|
|
438
399
|
primary_lifespan = graceful_shutdown_only_lifespan
|
|
439
400
|
|
|
@@ -446,6 +407,11 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
446
407
|
lifespan=primary_lifespan,
|
|
447
408
|
)
|
|
448
409
|
|
|
410
|
+
# Registry cleanup is now integrated directly into the lifespan above
|
|
411
|
+
|
|
412
|
+
# Store app reference for global shutdown coordination
|
|
413
|
+
app.state.shutdown_step = self
|
|
414
|
+
|
|
449
415
|
self.logger.debug(
|
|
450
416
|
f"Created FastAPI app for agent '{agent_name}' with lifespan: {primary_lifespan is not None}"
|
|
451
417
|
)
|
|
@@ -785,3 +751,180 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
785
751
|
"agent_id": context.get("agent_id"),
|
|
786
752
|
"registry_wrapper": context.get("registry_wrapper"),
|
|
787
753
|
}
|
|
754
|
+
|
|
755
|
+
async def _handle_existing_server(
|
|
756
|
+
self,
|
|
757
|
+
context: dict[str, Any],
|
|
758
|
+
result: Any,
|
|
759
|
+
existing_server: dict[str, Any],
|
|
760
|
+
existing_fastapi_app: dict[str, Any],
|
|
761
|
+
fastmcp_servers: dict[str, Any],
|
|
762
|
+
agent_config: dict[str, Any],
|
|
763
|
+
binding_config: dict[str, Any],
|
|
764
|
+
advertisement_config: dict[str, Any],
|
|
765
|
+
) -> Any:
|
|
766
|
+
"""
|
|
767
|
+
Handle mounting FastMCP on existing uvicorn server.
|
|
768
|
+
|
|
769
|
+
This is used when ServerDiscoveryStep finds an existing uvicorn server
|
|
770
|
+
(e.g., started immediately in @mesh.agent decorator) and we need to
|
|
771
|
+
mount FastMCP endpoints on it instead of starting a new server.
|
|
772
|
+
"""
|
|
773
|
+
try:
|
|
774
|
+
self.logger.info("🔄 SERVER REUSE: Mounting FastMCP on existing server")
|
|
775
|
+
|
|
776
|
+
# Get the existing minimal FastAPI app that's already running
|
|
777
|
+
existing_app = None
|
|
778
|
+
if existing_fastapi_app and "app" in existing_fastapi_app:
|
|
779
|
+
existing_app = existing_fastapi_app["app"]
|
|
780
|
+
elif existing_fastapi_app and "instance" in existing_fastapi_app:
|
|
781
|
+
existing_app = existing_fastapi_app["instance"]
|
|
782
|
+
elif existing_server and "app" in existing_server:
|
|
783
|
+
existing_app = existing_server["app"]
|
|
784
|
+
else:
|
|
785
|
+
# As fallback, try to get the app from DecoratorRegistry
|
|
786
|
+
from ...engine.decorator_registry import DecoratorRegistry
|
|
787
|
+
|
|
788
|
+
server_info = DecoratorRegistry.get_immediate_uvicorn_server()
|
|
789
|
+
if server_info and "app" in server_info:
|
|
790
|
+
existing_app = server_info["app"]
|
|
791
|
+
|
|
792
|
+
if not existing_app:
|
|
793
|
+
raise ValueError("No existing FastAPI app found for server reuse")
|
|
794
|
+
|
|
795
|
+
self.logger.info(
|
|
796
|
+
f"🔄 SERVER REUSE: Using existing FastAPI app '{existing_app.title}' for FastMCP mounting"
|
|
797
|
+
)
|
|
798
|
+
|
|
799
|
+
# Check if FastMCP lifespan is already integrated with the FastAPI app
|
|
800
|
+
from ...engine.decorator_registry import DecoratorRegistry
|
|
801
|
+
|
|
802
|
+
fastmcp_lifespan = DecoratorRegistry.get_fastmcp_lifespan()
|
|
803
|
+
fastmcp_http_app = DecoratorRegistry.get_fastmcp_http_app()
|
|
804
|
+
|
|
805
|
+
mcp_wrappers = {}
|
|
806
|
+
if fastmcp_servers:
|
|
807
|
+
if fastmcp_lifespan and fastmcp_http_app:
|
|
808
|
+
self.logger.info(
|
|
809
|
+
"✅ SERVER REUSE: FastMCP lifespan already integrated, mounting same HTTP app"
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
# FastMCP lifespan is already integrated, mount the same HTTP app that was used for lifespan
|
|
813
|
+
for server_key, server_instance in fastmcp_servers.items():
|
|
814
|
+
try:
|
|
815
|
+
# Mount the same FastMCP HTTP app that was used for lifespan integration
|
|
816
|
+
# This ensures the session manager is shared between lifespan and routes
|
|
817
|
+
existing_app.mount("", fastmcp_http_app)
|
|
818
|
+
self.logger.info(
|
|
819
|
+
f"🔌 SERVER REUSE: Mounted FastMCP server '{server_key}' using stored HTTP app (lifespan already integrated)"
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
mcp_wrappers[server_key] = {
|
|
823
|
+
"fastmcp_app": fastmcp_http_app,
|
|
824
|
+
"server_instance": server_instance,
|
|
825
|
+
"lifespan_integrated": True,
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
except Exception as e:
|
|
829
|
+
self.logger.error(
|
|
830
|
+
f"❌ SERVER REUSE: Failed to mount FastMCP server '{server_key}': {e}"
|
|
831
|
+
)
|
|
832
|
+
result.add_error(
|
|
833
|
+
f"Failed to mount server '{server_key}': {e}"
|
|
834
|
+
)
|
|
835
|
+
else:
|
|
836
|
+
self.logger.info(
|
|
837
|
+
"🔄 SERVER REUSE: No FastMCP lifespan integrated, using HttpMcpWrapper"
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
# No lifespan integration, use HttpMcpWrapper (fallback method)
|
|
841
|
+
for server_key, server_instance in fastmcp_servers.items():
|
|
842
|
+
try:
|
|
843
|
+
# Create HttpMcpWrapper for proper FastMCP app creation and session routing
|
|
844
|
+
from ...engine.http_wrapper import HttpMcpWrapper
|
|
845
|
+
|
|
846
|
+
mcp_wrapper = HttpMcpWrapper(server_instance)
|
|
847
|
+
await mcp_wrapper.setup()
|
|
848
|
+
|
|
849
|
+
# Mount using the wrapper's properly configured FastMCP app
|
|
850
|
+
if mcp_wrapper._mcp_app:
|
|
851
|
+
# Mount at root since FastMCP creates its own /mcp routes internally
|
|
852
|
+
existing_app.mount("", mcp_wrapper._mcp_app)
|
|
853
|
+
self.logger.info(
|
|
854
|
+
f"🔌 SERVER REUSE: Mounted FastMCP server '{server_key}' via HttpMcpWrapper at root (provides /mcp routes)"
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
mcp_wrappers[server_key] = {
|
|
858
|
+
"wrapper": mcp_wrapper,
|
|
859
|
+
"server_instance": server_instance,
|
|
860
|
+
"lifespan_integrated": False,
|
|
861
|
+
}
|
|
862
|
+
except Exception as e:
|
|
863
|
+
self.logger.error(
|
|
864
|
+
f"❌ SERVER REUSE: Failed to create HttpMcpWrapper for server '{server_key}': {e}"
|
|
865
|
+
)
|
|
866
|
+
result.add_error(
|
|
867
|
+
f"Failed to wrap server '{server_key}': {e}"
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
# Add K8s health endpoints to existing app (if not already present)
|
|
871
|
+
self._add_k8s_endpoints(existing_app, agent_config, mcp_wrappers)
|
|
872
|
+
|
|
873
|
+
# FastMCP servers are already mounted directly - no additional integration needed
|
|
874
|
+
self.logger.info(
|
|
875
|
+
"🔌 SERVER REUSE: All FastMCP servers mounted successfully"
|
|
876
|
+
)
|
|
877
|
+
|
|
878
|
+
# Store context for graceful shutdown access
|
|
879
|
+
self._store_context_for_shutdown(context)
|
|
880
|
+
|
|
881
|
+
# Store agent_id for metadata endpoint access
|
|
882
|
+
agent_id = context.get("agent_id")
|
|
883
|
+
if agent_id:
|
|
884
|
+
self._current_context = self._current_context or {}
|
|
885
|
+
self._current_context["agent_id"] = agent_id
|
|
886
|
+
|
|
887
|
+
# Store mcp_wrappers for session stats access
|
|
888
|
+
self._current_context = self._current_context or {}
|
|
889
|
+
self._current_context["mcp_wrappers"] = mcp_wrappers
|
|
890
|
+
|
|
891
|
+
# FastMCP is now mounted directly - no server replacement needed
|
|
892
|
+
self.logger.info(
|
|
893
|
+
"🔄 SERVER REUSE: FastMCP routes mounted to existing app successfully"
|
|
894
|
+
)
|
|
895
|
+
|
|
896
|
+
# Store results in context (existing app updated, server reused)
|
|
897
|
+
result.add_context("fastapi_app", existing_app)
|
|
898
|
+
result.add_context("mcp_wrappers", mcp_wrappers)
|
|
899
|
+
result.add_context("fastapi_binding_config", binding_config)
|
|
900
|
+
result.add_context("fastapi_advertisement_config", advertisement_config)
|
|
901
|
+
result.add_context(
|
|
902
|
+
"server_reused", True
|
|
903
|
+
) # Flag to skip uvicorn.run() in orchestrator
|
|
904
|
+
|
|
905
|
+
bind_host = binding_config["bind_host"]
|
|
906
|
+
bind_port = binding_config["bind_port"]
|
|
907
|
+
external_host = advertisement_config["external_host"]
|
|
908
|
+
external_endpoint = (
|
|
909
|
+
advertisement_config.get("external_endpoint")
|
|
910
|
+
or f"http://{external_host}:{bind_port}"
|
|
911
|
+
)
|
|
912
|
+
|
|
913
|
+
result.message = f"FastAPI app mounted on existing server {bind_host}:{bind_port} (external: {external_endpoint})"
|
|
914
|
+
self.logger.info(
|
|
915
|
+
f"✅ SERVER REUSE: FastMCP mounted on existing server with {len(mcp_wrappers)} MCP wrappers"
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
except Exception as e:
|
|
919
|
+
self.logger.error(
|
|
920
|
+
f"❌ SERVER REUSE: Failed to mount on existing server: {e}"
|
|
921
|
+
)
|
|
922
|
+
result.status = (
|
|
923
|
+
result.PipelineStatus.FAILED
|
|
924
|
+
if hasattr(result, "PipelineStatus")
|
|
925
|
+
else "failed"
|
|
926
|
+
)
|
|
927
|
+
result.message = f"Server reuse failed: {e}"
|
|
928
|
+
result.add_error(str(e))
|
|
929
|
+
|
|
930
|
+
return result
|