mcp-mesh 0.7.12__py3-none-any.whl → 0.7.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _mcp_mesh/__init__.py +1 -1
- _mcp_mesh/engine/__init__.py +1 -22
- _mcp_mesh/engine/async_mcp_client.py +88 -25
- _mcp_mesh/engine/decorator_registry.py +10 -9
- _mcp_mesh/engine/dependency_injector.py +64 -53
- _mcp_mesh/engine/mesh_llm_agent.py +119 -5
- _mcp_mesh/engine/mesh_llm_agent_injector.py +30 -0
- _mcp_mesh/engine/session_aware_client.py +3 -3
- _mcp_mesh/engine/unified_mcp_proxy.py +82 -90
- _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +0 -89
- _mcp_mesh/pipeline/api_heartbeat/api_fast_heartbeat_check.py +3 -3
- _mcp_mesh/pipeline/api_heartbeat/api_heartbeat_pipeline.py +30 -28
- _mcp_mesh/pipeline/mcp_heartbeat/dependency_resolution.py +16 -18
- _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +5 -5
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +3 -3
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +6 -6
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_send.py +1 -1
- _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +15 -11
- _mcp_mesh/pipeline/mcp_heartbeat/registry_connection.py +3 -3
- _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +37 -268
- _mcp_mesh/pipeline/mcp_startup/lifespan_factory.py +142 -0
- _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +57 -93
- _mcp_mesh/pipeline/shared/registry_connection.py +1 -1
- _mcp_mesh/shared/health_check_manager.py +313 -0
- _mcp_mesh/shared/logging_config.py +190 -7
- _mcp_mesh/shared/registry_client_wrapper.py +8 -8
- _mcp_mesh/shared/sse_parser.py +19 -17
- _mcp_mesh/tracing/execution_tracer.py +26 -1
- _mcp_mesh/tracing/fastapi_tracing_middleware.py +3 -4
- _mcp_mesh/tracing/trace_context_helper.py +25 -6
- {mcp_mesh-0.7.12.dist-info → mcp_mesh-0.7.13.dist-info}/METADATA +1 -1
- {mcp_mesh-0.7.12.dist-info → mcp_mesh-0.7.13.dist-info}/RECORD +38 -39
- mesh/__init__.py +3 -1
- mesh/decorators.py +81 -43
- mesh/helpers.py +72 -4
- mesh/types.py +48 -4
- _mcp_mesh/engine/full_mcp_proxy.py +0 -641
- _mcp_mesh/engine/mcp_client_proxy.py +0 -457
- _mcp_mesh/shared/health_check_cache.py +0 -246
- {mcp_mesh-0.7.12.dist-info → mcp_mesh-0.7.13.dist-info}/WHEEL +0 -0
- {mcp_mesh-0.7.12.dist-info → mcp_mesh-0.7.13.dist-info}/licenses/LICENSE +0 -0
|
@@ -234,171 +234,50 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
234
234
|
) -> Any:
|
|
235
235
|
"""Create FastAPI application with proper FastMCP lifespan integration."""
|
|
236
236
|
try:
|
|
237
|
-
import
|
|
238
|
-
from contextlib import asynccontextmanager
|
|
237
|
+
from fastapi import FastAPI
|
|
239
238
|
|
|
240
|
-
from
|
|
239
|
+
from .lifespan_factory import (
|
|
240
|
+
create_minimal_lifespan,
|
|
241
|
+
create_multiple_fastmcp_lifespan,
|
|
242
|
+
create_single_fastmcp_lifespan,
|
|
243
|
+
)
|
|
241
244
|
|
|
242
245
|
agent_name = agent_config.get("name", "mcp-mesh-agent")
|
|
243
246
|
agent_description = agent_config.get(
|
|
244
247
|
"description", "MCP Mesh Agent with FastAPI integration"
|
|
245
248
|
)
|
|
246
249
|
|
|
247
|
-
#
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
if len(
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
if fastmcp_lifespan:
|
|
269
|
-
|
|
270
|
-
@asynccontextmanager
|
|
271
|
-
async def simple_fastmcp_lifespan(main_app):
|
|
272
|
-
"""Simple lifespan for single FastMCP server."""
|
|
273
|
-
fastmcp_ctx = None
|
|
274
|
-
try:
|
|
275
|
-
fastmcp_ctx = fastmcp_lifespan(main_app)
|
|
276
|
-
await fastmcp_ctx.__aenter__()
|
|
277
|
-
self.logger.debug("Started FastMCP lifespan")
|
|
278
|
-
except Exception as e:
|
|
279
|
-
self.logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
280
|
-
|
|
281
|
-
try:
|
|
282
|
-
yield
|
|
283
|
-
finally:
|
|
284
|
-
# Graceful shutdown - unregister from registry
|
|
285
|
-
await self._graceful_shutdown(main_app)
|
|
286
|
-
|
|
287
|
-
# Clean up FastMCP lifespan
|
|
288
|
-
if fastmcp_ctx:
|
|
289
|
-
try:
|
|
290
|
-
await fastmcp_ctx.__aexit__(None, None, None)
|
|
291
|
-
self.logger.debug("FastMCP lifespan stopped")
|
|
292
|
-
except Exception as e:
|
|
293
|
-
self.logger.warning(
|
|
294
|
-
f"Error closing FastMCP lifespan: {e}"
|
|
295
|
-
)
|
|
296
|
-
|
|
297
|
-
primary_lifespan = simple_fastmcp_lifespan
|
|
298
|
-
else:
|
|
299
|
-
primary_lifespan = None
|
|
300
|
-
|
|
301
|
-
elif len(fastmcp_servers) > 1:
|
|
302
|
-
# Multiple FastMCP servers - combine lifespans
|
|
303
|
-
self.logger.debug(
|
|
304
|
-
"Creating combined lifespan for multiple FastMCP servers"
|
|
250
|
+
# Callback to get shutdown context at cleanup time
|
|
251
|
+
def get_shutdown_context():
|
|
252
|
+
return getattr(self, "_current_context", {})
|
|
253
|
+
|
|
254
|
+
# Collect FastMCP lifespans from pre-created wrappers
|
|
255
|
+
fastmcp_lifespans = []
|
|
256
|
+
for wrapper_data in (mcp_wrappers or {}).values():
|
|
257
|
+
mcp_wrapper = wrapper_data["wrapper"]
|
|
258
|
+
if (
|
|
259
|
+
hasattr(mcp_wrapper, "_mcp_app")
|
|
260
|
+
and mcp_wrapper._mcp_app
|
|
261
|
+
and hasattr(mcp_wrapper._mcp_app, "lifespan")
|
|
262
|
+
):
|
|
263
|
+
fastmcp_lifespans.append(mcp_wrapper._mcp_app.lifespan)
|
|
264
|
+
|
|
265
|
+
# Select appropriate lifespan factory based on FastMCP server count
|
|
266
|
+
if len(fastmcp_lifespans) == 1:
|
|
267
|
+
self.logger.debug("Creating lifespan for single FastMCP server")
|
|
268
|
+
primary_lifespan = create_single_fastmcp_lifespan(
|
|
269
|
+
fastmcp_lifespans[0], get_shutdown_context
|
|
305
270
|
)
|
|
306
|
-
|
|
307
|
-
# Collect FastMCP lifespans from pre-created wrappers
|
|
308
|
-
fastmcp_lifespans = []
|
|
309
|
-
for server_key, wrapper_data in mcp_wrappers.items():
|
|
310
|
-
mcp_wrapper = wrapper_data["wrapper"]
|
|
311
|
-
if (
|
|
312
|
-
hasattr(mcp_wrapper, "_mcp_app")
|
|
313
|
-
and mcp_wrapper._mcp_app
|
|
314
|
-
and hasattr(mcp_wrapper._mcp_app, "lifespan")
|
|
315
|
-
):
|
|
316
|
-
fastmcp_lifespans.append(mcp_wrapper._mcp_app.lifespan)
|
|
317
|
-
self.logger.debug(
|
|
318
|
-
f"Collected lifespan from FastMCP wrapper '{server_key}'"
|
|
319
|
-
)
|
|
320
|
-
|
|
321
|
-
if fastmcp_lifespans:
|
|
322
|
-
|
|
323
|
-
@asynccontextmanager
|
|
324
|
-
async def multiple_fastmcp_lifespan(main_app):
|
|
325
|
-
"""Combined lifespan for multiple FastMCP servers."""
|
|
326
|
-
lifespan_contexts = []
|
|
327
|
-
for lifespan in fastmcp_lifespans:
|
|
328
|
-
try:
|
|
329
|
-
ctx = lifespan(main_app)
|
|
330
|
-
await ctx.__aenter__()
|
|
331
|
-
lifespan_contexts.append(ctx)
|
|
332
|
-
except Exception as e:
|
|
333
|
-
self.logger.error(
|
|
334
|
-
f"Failed to start FastMCP lifespan: {e}"
|
|
335
|
-
)
|
|
336
|
-
|
|
337
|
-
try:
|
|
338
|
-
yield
|
|
339
|
-
finally:
|
|
340
|
-
# Registry cleanup using simple shutdown
|
|
341
|
-
context = getattr(self, "_current_context", {})
|
|
342
|
-
registry_url = context.get(
|
|
343
|
-
"registry_url", "http://localhost:8001"
|
|
344
|
-
)
|
|
345
|
-
agent_id = context.get("agent_id", "unknown")
|
|
346
|
-
|
|
347
|
-
try:
|
|
348
|
-
from ...shared.simple_shutdown import (
|
|
349
|
-
_simple_shutdown_coordinator,
|
|
350
|
-
)
|
|
351
|
-
|
|
352
|
-
_simple_shutdown_coordinator.set_shutdown_context(
|
|
353
|
-
registry_url, agent_id
|
|
354
|
-
)
|
|
355
|
-
await _simple_shutdown_coordinator.perform_registry_cleanup()
|
|
356
|
-
except Exception as e:
|
|
357
|
-
self.logger.error(f"❌ Registry cleanup error: {e}")
|
|
358
|
-
|
|
359
|
-
# Clean up all lifespans in reverse order
|
|
360
|
-
for ctx in reversed(lifespan_contexts):
|
|
361
|
-
try:
|
|
362
|
-
await ctx.__aexit__(None, None, None)
|
|
363
|
-
except Exception as e:
|
|
364
|
-
self.logger.warning(
|
|
365
|
-
f"Error closing FastMCP lifespan: {e}"
|
|
366
|
-
)
|
|
367
|
-
|
|
368
|
-
primary_lifespan = multiple_fastmcp_lifespan
|
|
369
|
-
|
|
370
|
-
# Add minimal graceful shutdown lifespan if no FastMCP lifespans found
|
|
371
|
-
if primary_lifespan is None:
|
|
271
|
+
elif len(fastmcp_lifespans) > 1:
|
|
372
272
|
self.logger.debug(
|
|
373
|
-
"Creating
|
|
273
|
+
f"Creating lifespan for {len(fastmcp_lifespans)} FastMCP servers"
|
|
374
274
|
)
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
finally:
|
|
382
|
-
# Registry cleanup using simple shutdown
|
|
383
|
-
context = getattr(self, "_current_context", {})
|
|
384
|
-
registry_url = context.get(
|
|
385
|
-
"registry_url", "http://localhost:8001"
|
|
386
|
-
)
|
|
387
|
-
agent_id = context.get("agent_id", "unknown")
|
|
388
|
-
|
|
389
|
-
try:
|
|
390
|
-
from ...shared.simple_shutdown import (
|
|
391
|
-
_simple_shutdown_coordinator,
|
|
392
|
-
)
|
|
393
|
-
|
|
394
|
-
_simple_shutdown_coordinator.set_shutdown_context(
|
|
395
|
-
registry_url, agent_id
|
|
396
|
-
)
|
|
397
|
-
await _simple_shutdown_coordinator.perform_registry_cleanup()
|
|
398
|
-
except Exception as e:
|
|
399
|
-
self.logger.error(f"❌ Registry cleanup error: {e}")
|
|
400
|
-
|
|
401
|
-
primary_lifespan = graceful_shutdown_only_lifespan
|
|
275
|
+
primary_lifespan = create_multiple_fastmcp_lifespan(
|
|
276
|
+
fastmcp_lifespans, get_shutdown_context
|
|
277
|
+
)
|
|
278
|
+
else:
|
|
279
|
+
self.logger.debug("Creating minimal lifespan (no FastMCP servers)")
|
|
280
|
+
primary_lifespan = create_minimal_lifespan(get_shutdown_context)
|
|
402
281
|
|
|
403
282
|
app = FastAPI(
|
|
404
283
|
title=f"MCP Mesh Agent: {agent_name}",
|
|
@@ -451,7 +330,7 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
451
330
|
if health_check_fn:
|
|
452
331
|
# Use health check cache if configured
|
|
453
332
|
from ...engine.decorator_registry import DecoratorRegistry
|
|
454
|
-
from ...shared.
|
|
333
|
+
from ...shared.health_check_manager import get_health_status_with_cache
|
|
455
334
|
|
|
456
335
|
health_status = await get_health_status_with_cache(
|
|
457
336
|
agent_id=agent_name,
|
|
@@ -486,63 +365,8 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
486
365
|
|
|
487
366
|
await update_health_result()
|
|
488
367
|
|
|
489
|
-
# Note: /health
|
|
490
|
-
#
|
|
491
|
-
|
|
492
|
-
@app.get("/ready")
|
|
493
|
-
@app.head("/ready")
|
|
494
|
-
async def ready(response: Response):
|
|
495
|
-
"""
|
|
496
|
-
Readiness check for Kubernetes.
|
|
497
|
-
|
|
498
|
-
Returns 200 when the service is ready to serve traffic.
|
|
499
|
-
Returns 503 when unhealthy - K8s will remove pod from service endpoints.
|
|
500
|
-
"""
|
|
501
|
-
# Get health check result if available
|
|
502
|
-
from ...engine.decorator_registry import DecoratorRegistry
|
|
503
|
-
|
|
504
|
-
custom_health = DecoratorRegistry.get_health_check_result()
|
|
505
|
-
|
|
506
|
-
if custom_health:
|
|
507
|
-
status = custom_health.get("status", "starting")
|
|
508
|
-
if status == "healthy":
|
|
509
|
-
response.status_code = 200
|
|
510
|
-
return {
|
|
511
|
-
"ready": True,
|
|
512
|
-
"agent": agent_name,
|
|
513
|
-
"status": status,
|
|
514
|
-
"mcp_wrappers": len(mcp_wrappers),
|
|
515
|
-
"timestamp": self._get_timestamp(),
|
|
516
|
-
}
|
|
517
|
-
else:
|
|
518
|
-
# Not ready to serve traffic
|
|
519
|
-
response.status_code = 503
|
|
520
|
-
return {
|
|
521
|
-
"ready": False,
|
|
522
|
-
"agent": agent_name,
|
|
523
|
-
"status": status,
|
|
524
|
-
"reason": f"Service is {status}",
|
|
525
|
-
"errors": custom_health.get("errors", []),
|
|
526
|
-
}
|
|
527
|
-
else:
|
|
528
|
-
# No custom health check - assume ready
|
|
529
|
-
response.status_code = 200
|
|
530
|
-
return {
|
|
531
|
-
"ready": True,
|
|
532
|
-
"agent": agent_name,
|
|
533
|
-
"mcp_wrappers": len(mcp_wrappers),
|
|
534
|
-
"timestamp": self._get_timestamp(),
|
|
535
|
-
}
|
|
536
|
-
|
|
537
|
-
@app.get("/livez")
|
|
538
|
-
@app.head("/livez")
|
|
539
|
-
async def livez():
|
|
540
|
-
"""Liveness check for Kubernetes."""
|
|
541
|
-
return {
|
|
542
|
-
"alive": True,
|
|
543
|
-
"agent": agent_name,
|
|
544
|
-
"timestamp": self._get_timestamp(),
|
|
545
|
-
}
|
|
368
|
+
# Note: /health, /ready, /livez endpoints are registered by immediate uvicorn
|
|
369
|
+
# in decorators.py. They use health_check_manager to get stored health data.
|
|
546
370
|
|
|
547
371
|
@app.get("/metrics")
|
|
548
372
|
async def metrics():
|
|
@@ -773,61 +597,6 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
773
597
|
|
|
774
598
|
return datetime.now(UTC).isoformat()
|
|
775
599
|
|
|
776
|
-
async def _graceful_shutdown(self, main_app: Any) -> None:
|
|
777
|
-
"""
|
|
778
|
-
Perform graceful shutdown by unregistering agent from registry.
|
|
779
|
-
|
|
780
|
-
Args:
|
|
781
|
-
main_app: FastAPI application instance (unused but required by lifespan signature)
|
|
782
|
-
"""
|
|
783
|
-
try:
|
|
784
|
-
# Get pipeline context from the step execution context
|
|
785
|
-
# Note: We need to access the context from where this was called
|
|
786
|
-
context = getattr(self, "_current_context", {})
|
|
787
|
-
|
|
788
|
-
# Get registry configuration
|
|
789
|
-
registry_url = context.get("registry_url")
|
|
790
|
-
agent_id = context.get("agent_id")
|
|
791
|
-
|
|
792
|
-
if not registry_url or not agent_id:
|
|
793
|
-
self.logger.warning(
|
|
794
|
-
f"🚨 Cannot perform graceful shutdown: missing registry_url={registry_url} or agent_id={agent_id}"
|
|
795
|
-
)
|
|
796
|
-
return
|
|
797
|
-
|
|
798
|
-
# Get or create registry client wrapper
|
|
799
|
-
registry_wrapper = context.get("registry_wrapper")
|
|
800
|
-
if not registry_wrapper:
|
|
801
|
-
# Create new registry client for shutdown
|
|
802
|
-
from ...generated.mcp_mesh_registry_client.api_client import ApiClient
|
|
803
|
-
from ...generated.mcp_mesh_registry_client.configuration import (
|
|
804
|
-
Configuration,
|
|
805
|
-
)
|
|
806
|
-
from ...shared.registry_client_wrapper import RegistryClientWrapper
|
|
807
|
-
from ..registry_connection import RegistryConnectionStep
|
|
808
|
-
|
|
809
|
-
config = Configuration(host=registry_url)
|
|
810
|
-
api_client = ApiClient(configuration=config)
|
|
811
|
-
registry_wrapper = RegistryClientWrapper(api_client)
|
|
812
|
-
self.logger.debug(
|
|
813
|
-
f"🔧 Created registry client for graceful shutdown: {registry_url}"
|
|
814
|
-
)
|
|
815
|
-
|
|
816
|
-
# Perform graceful unregistration
|
|
817
|
-
success = await registry_wrapper.unregister_agent(agent_id)
|
|
818
|
-
if success:
|
|
819
|
-
self.logger.info(
|
|
820
|
-
f"🏁 Graceful shutdown completed for agent '{agent_id}'"
|
|
821
|
-
)
|
|
822
|
-
else:
|
|
823
|
-
self.logger.warning(
|
|
824
|
-
f"⚠️ Graceful shutdown failed for agent '{agent_id}' - continuing shutdown"
|
|
825
|
-
)
|
|
826
|
-
|
|
827
|
-
except Exception as e:
|
|
828
|
-
# Don't fail the shutdown process due to unregistration errors
|
|
829
|
-
self.logger.error(f"❌ Graceful shutdown error: {e} - continuing shutdown")
|
|
830
|
-
|
|
831
600
|
def _store_context_for_shutdown(self, context: dict[str, Any]) -> None:
|
|
832
601
|
"""Store context for access during shutdown."""
|
|
833
602
|
# Store essential shutdown information
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Factory functions for creating FastAPI lifespan context managers.
|
|
2
|
+
|
|
3
|
+
Provides clean separation of lifespan creation logic from FastAPI app setup.
|
|
4
|
+
Handles single FastMCP, multiple FastMCP, and minimal (no FastMCP) scenarios.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from collections.abc import Callable
|
|
9
|
+
from contextlib import asynccontextmanager
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def _perform_registry_cleanup(
|
|
16
|
+
registry_url: str | None,
|
|
17
|
+
agent_id: str | None,
|
|
18
|
+
) -> None:
|
|
19
|
+
"""
|
|
20
|
+
Unregister agent from registry during shutdown.
|
|
21
|
+
|
|
22
|
+
Skips cleanup if registry_url or agent_id is missing - this indicates
|
|
23
|
+
the agent never connected to registry and is running in standalone mode.
|
|
24
|
+
"""
|
|
25
|
+
if not registry_url or not agent_id or agent_id == "unknown":
|
|
26
|
+
logger.debug(
|
|
27
|
+
f"Skipping registry cleanup: registry_url={registry_url}, agent_id={agent_id}"
|
|
28
|
+
)
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from ...shared.simple_shutdown import _simple_shutdown_coordinator
|
|
33
|
+
|
|
34
|
+
_simple_shutdown_coordinator.set_shutdown_context(registry_url, agent_id)
|
|
35
|
+
await _simple_shutdown_coordinator.perform_registry_cleanup()
|
|
36
|
+
except Exception as e:
|
|
37
|
+
logger.error(f"Registry cleanup error: {e}")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def create_single_fastmcp_lifespan(
|
|
41
|
+
fastmcp_lifespan: Callable,
|
|
42
|
+
get_shutdown_context: Callable[[], dict[str, Any]],
|
|
43
|
+
) -> Callable:
|
|
44
|
+
"""
|
|
45
|
+
Create lifespan for single FastMCP server.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
fastmcp_lifespan: The lifespan context manager from FastMCP app
|
|
49
|
+
get_shutdown_context: Callback to get registry_url and agent_id at shutdown time
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
@asynccontextmanager
|
|
53
|
+
async def lifespan(app):
|
|
54
|
+
fastmcp_ctx = None
|
|
55
|
+
try:
|
|
56
|
+
fastmcp_ctx = fastmcp_lifespan(app)
|
|
57
|
+
await fastmcp_ctx.__aenter__()
|
|
58
|
+
logger.debug("Started FastMCP lifespan")
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
yield
|
|
64
|
+
finally:
|
|
65
|
+
ctx = get_shutdown_context()
|
|
66
|
+
await _perform_registry_cleanup(
|
|
67
|
+
ctx.get("registry_url"),
|
|
68
|
+
ctx.get("agent_id"),
|
|
69
|
+
)
|
|
70
|
+
if fastmcp_ctx:
|
|
71
|
+
try:
|
|
72
|
+
await fastmcp_ctx.__aexit__(None, None, None)
|
|
73
|
+
logger.debug("FastMCP lifespan stopped")
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.warning(f"Error closing FastMCP lifespan: {e}")
|
|
76
|
+
|
|
77
|
+
return lifespan
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def create_multiple_fastmcp_lifespan(
|
|
81
|
+
fastmcp_lifespans: list[Callable],
|
|
82
|
+
get_shutdown_context: Callable[[], dict[str, Any]],
|
|
83
|
+
) -> Callable:
|
|
84
|
+
"""
|
|
85
|
+
Create combined lifespan for multiple FastMCP servers.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
fastmcp_lifespans: List of lifespan context managers from FastMCP apps
|
|
89
|
+
get_shutdown_context: Callback to get registry_url and agent_id at shutdown time
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
@asynccontextmanager
|
|
93
|
+
async def lifespan(app):
|
|
94
|
+
lifespan_contexts = []
|
|
95
|
+
for ls in fastmcp_lifespans:
|
|
96
|
+
try:
|
|
97
|
+
ctx = ls(app)
|
|
98
|
+
await ctx.__aenter__()
|
|
99
|
+
lifespan_contexts.append(ctx)
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.error(f"Failed to start FastMCP lifespan: {e}")
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
yield
|
|
105
|
+
finally:
|
|
106
|
+
ctx = get_shutdown_context()
|
|
107
|
+
await _perform_registry_cleanup(
|
|
108
|
+
ctx.get("registry_url"),
|
|
109
|
+
ctx.get("agent_id"),
|
|
110
|
+
)
|
|
111
|
+
# Exit in reverse order (LIFO) for proper cleanup
|
|
112
|
+
for lctx in reversed(lifespan_contexts):
|
|
113
|
+
try:
|
|
114
|
+
await lctx.__aexit__(None, None, None)
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.warning(f"Error closing FastMCP lifespan: {e}")
|
|
117
|
+
|
|
118
|
+
return lifespan
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def create_minimal_lifespan(
|
|
122
|
+
get_shutdown_context: Callable[[], dict[str, Any]],
|
|
123
|
+
) -> Callable:
|
|
124
|
+
"""
|
|
125
|
+
Create minimal lifespan for graceful shutdown only (no FastMCP servers).
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
get_shutdown_context: Callback to get registry_url and agent_id at shutdown time
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
@asynccontextmanager
|
|
132
|
+
async def lifespan(app):
|
|
133
|
+
try:
|
|
134
|
+
yield
|
|
135
|
+
finally:
|
|
136
|
+
ctx = get_shutdown_context()
|
|
137
|
+
await _perform_registry_cleanup(
|
|
138
|
+
ctx.get("registry_url"),
|
|
139
|
+
ctx.get("agent_id"),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return lifespan
|