mcp-mesh 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _mcp_mesh/__init__.py +1 -1
- _mcp_mesh/engine/decorator_registry.py +26 -2
- _mcp_mesh/engine/dependency_injector.py +14 -1
- _mcp_mesh/engine/llm_config.py +11 -7
- _mcp_mesh/engine/mesh_llm_agent.py +247 -61
- _mcp_mesh/engine/mesh_llm_agent_injector.py +174 -0
- _mcp_mesh/engine/provider_handlers/__init__.py +20 -0
- _mcp_mesh/engine/provider_handlers/base_provider_handler.py +122 -0
- _mcp_mesh/engine/provider_handlers/claude_handler.py +138 -0
- _mcp_mesh/engine/provider_handlers/generic_handler.py +156 -0
- _mcp_mesh/engine/provider_handlers/openai_handler.py +163 -0
- _mcp_mesh/engine/provider_handlers/provider_handler_registry.py +167 -0
- _mcp_mesh/engine/response_parser.py +3 -38
- _mcp_mesh/engine/tool_schema_builder.py +3 -2
- _mcp_mesh/generated/.openapi-generator/FILES +3 -0
- _mcp_mesh/generated/.openapi-generator-ignore +0 -1
- _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +51 -97
- _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +42 -72
- _mcp_mesh/generated/mcp_mesh_registry_client/models/agent_info.py +11 -1
- _mcp_mesh/generated/mcp_mesh_registry_client/models/dependency_resolution_info.py +108 -0
- _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_provider.py +95 -0
- _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +37 -58
- _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +32 -63
- _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +30 -29
- _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +41 -59
- _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +51 -98
- _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +70 -85
- _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +51 -84
- _mcp_mesh/generated/mcp_mesh_registry_client/models/resolved_llm_provider.py +112 -0
- _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +54 -21
- _mcp_mesh/pipeline/mcp_heartbeat/dependency_resolution.py +43 -26
- _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +3 -3
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +35 -10
- _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +1 -1
- _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +77 -39
- _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +118 -35
- _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +1 -1
- _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +48 -3
- _mcp_mesh/pipeline/mcp_startup/server_discovery.py +77 -48
- _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +2 -2
- _mcp_mesh/shared/health_check_cache.py +246 -0
- _mcp_mesh/shared/registry_client_wrapper.py +29 -2
- {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/METADATA +1 -1
- {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/RECORD +50 -39
- mesh/__init__.py +12 -2
- mesh/decorators.py +105 -39
- mesh/helpers.py +259 -0
- mesh/types.py +53 -4
- {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/WHEEL +0 -0
- {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -54,7 +54,7 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
54
54
|
|
|
55
55
|
# Handle existing server case - mount FastMCP with proper lifespan integration
|
|
56
56
|
if server_reuse and existing_server:
|
|
57
|
-
self.logger.
|
|
57
|
+
self.logger.debug(
|
|
58
58
|
"🔄 SERVER REUSE: Found existing server, will mount FastMCP with proper lifespan integration"
|
|
59
59
|
)
|
|
60
60
|
return await self._handle_existing_server(
|
|
@@ -103,7 +103,9 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
103
103
|
)
|
|
104
104
|
|
|
105
105
|
# Add K8s health endpoints
|
|
106
|
-
self._add_k8s_endpoints(
|
|
106
|
+
await self._add_k8s_endpoints(
|
|
107
|
+
fastapi_app, agent_config, mcp_wrappers, context
|
|
108
|
+
)
|
|
107
109
|
|
|
108
110
|
# Integrate MCP wrappers into the main FastAPI app
|
|
109
111
|
for server_key, wrapper_data in mcp_wrappers.items():
|
|
@@ -235,7 +237,7 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
235
237
|
import asyncio
|
|
236
238
|
from contextlib import asynccontextmanager
|
|
237
239
|
|
|
238
|
-
from fastapi import FastAPI
|
|
240
|
+
from fastapi import FastAPI, Response
|
|
239
241
|
|
|
240
242
|
agent_name = agent_config.get("name", "mcp-mesh-agent")
|
|
241
243
|
agent_description = agent_config.get(
|
|
@@ -420,34 +422,113 @@ class FastAPIServerSetupStep(PipelineStep):
|
|
|
420
422
|
except ImportError as e:
|
|
421
423
|
raise Exception(f"FastAPI not available: {e}")
|
|
422
424
|
|
|
423
|
-
def _add_k8s_endpoints(
|
|
424
|
-
self,
|
|
425
|
+
async def _add_k8s_endpoints(
|
|
426
|
+
self,
|
|
427
|
+
app: Any,
|
|
428
|
+
agent_config: dict[str, Any],
|
|
429
|
+
mcp_wrappers: dict[str, Any],
|
|
430
|
+
context: dict[str, Any],
|
|
425
431
|
) -> None:
|
|
426
|
-
"""
|
|
432
|
+
"""
|
|
433
|
+
Set up health check result updates for K8s endpoints.
|
|
434
|
+
|
|
435
|
+
Note: The /health endpoint is already registered by immediate uvicorn.
|
|
436
|
+
We just need to update the result it returns via DecoratorRegistry.
|
|
437
|
+
"""
|
|
438
|
+
from fastapi import Response
|
|
439
|
+
|
|
427
440
|
agent_name = agent_config.get("name", "mcp-mesh-agent")
|
|
441
|
+
health_check_fn = agent_config.get("health_check")
|
|
442
|
+
health_check_ttl = agent_config.get("health_check_ttl", 15)
|
|
443
|
+
|
|
444
|
+
# Create a background task to update health check results periodically
|
|
445
|
+
async def update_health_result():
|
|
446
|
+
"""Update health check result in DecoratorRegistry."""
|
|
447
|
+
if health_check_fn:
|
|
448
|
+
# Use health check cache if configured
|
|
449
|
+
from ...engine.decorator_registry import DecoratorRegistry
|
|
450
|
+
from ...shared.health_check_cache import get_health_status_with_cache
|
|
451
|
+
|
|
452
|
+
health_status = await get_health_status_with_cache(
|
|
453
|
+
agent_id=agent_name,
|
|
454
|
+
health_check_fn=health_check_fn,
|
|
455
|
+
agent_config=agent_config,
|
|
456
|
+
startup_context=context,
|
|
457
|
+
ttl=health_check_ttl,
|
|
458
|
+
)
|
|
428
459
|
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
460
|
+
result = {
|
|
461
|
+
"status": health_status.status.value,
|
|
462
|
+
"agent": agent_name,
|
|
463
|
+
"checks": health_status.checks,
|
|
464
|
+
"errors": health_status.errors,
|
|
465
|
+
"timestamp": health_status.timestamp.isoformat(),
|
|
466
|
+
}
|
|
467
|
+
else:
|
|
468
|
+
# No health check configured - return default healthy status
|
|
469
|
+
result = {
|
|
470
|
+
"status": "healthy",
|
|
471
|
+
"agent": agent_name,
|
|
472
|
+
"timestamp": self._get_timestamp(),
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
# Store result for /health endpoint to use
|
|
476
|
+
from ...engine.decorator_registry import DecoratorRegistry
|
|
477
|
+
|
|
478
|
+
DecoratorRegistry.store_health_check_result(result)
|
|
479
|
+
|
|
480
|
+
# Run once immediately to populate initial result
|
|
481
|
+
# We're already in an async context (called from execute()), so just await it
|
|
482
|
+
|
|
483
|
+
await update_health_result()
|
|
484
|
+
|
|
485
|
+
# Note: /health endpoint is already registered by immediate uvicorn
|
|
486
|
+
# It will call DecoratorRegistry.get_health_check_result() to get this data
|
|
438
487
|
|
|
439
488
|
@app.get("/ready")
|
|
440
489
|
@app.head("/ready")
|
|
441
|
-
async def ready():
|
|
442
|
-
"""
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
490
|
+
async def ready(response: Response):
|
|
491
|
+
"""
|
|
492
|
+
Readiness check for Kubernetes.
|
|
493
|
+
|
|
494
|
+
Returns 200 when the service is ready to serve traffic.
|
|
495
|
+
Returns 503 when unhealthy - K8s will remove pod from service endpoints.
|
|
496
|
+
"""
|
|
497
|
+
# Get health check result if available
|
|
498
|
+
from ...engine.decorator_registry import DecoratorRegistry
|
|
499
|
+
|
|
500
|
+
custom_health = DecoratorRegistry.get_health_check_result()
|
|
501
|
+
|
|
502
|
+
if custom_health:
|
|
503
|
+
status = custom_health.get("status", "starting")
|
|
504
|
+
if status == "healthy":
|
|
505
|
+
response.status_code = 200
|
|
506
|
+
return {
|
|
507
|
+
"ready": True,
|
|
508
|
+
"agent": agent_name,
|
|
509
|
+
"status": status,
|
|
510
|
+
"mcp_wrappers": len(mcp_wrappers),
|
|
511
|
+
"timestamp": self._get_timestamp(),
|
|
512
|
+
}
|
|
513
|
+
else:
|
|
514
|
+
# Not ready to serve traffic
|
|
515
|
+
response.status_code = 503
|
|
516
|
+
return {
|
|
517
|
+
"ready": False,
|
|
518
|
+
"agent": agent_name,
|
|
519
|
+
"status": status,
|
|
520
|
+
"reason": f"Service is {status}",
|
|
521
|
+
"errors": custom_health.get("errors", []),
|
|
522
|
+
}
|
|
523
|
+
else:
|
|
524
|
+
# No custom health check - assume ready
|
|
525
|
+
response.status_code = 200
|
|
526
|
+
return {
|
|
527
|
+
"ready": True,
|
|
528
|
+
"agent": agent_name,
|
|
529
|
+
"mcp_wrappers": len(mcp_wrappers),
|
|
530
|
+
"timestamp": self._get_timestamp(),
|
|
531
|
+
}
|
|
451
532
|
|
|
452
533
|
@app.get("/livez")
|
|
453
534
|
@app.head("/livez")
|
|
@@ -771,7 +852,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
771
852
|
mount FastMCP endpoints on it instead of starting a new server.
|
|
772
853
|
"""
|
|
773
854
|
try:
|
|
774
|
-
self.logger.
|
|
855
|
+
self.logger.debug("🔄 SERVER REUSE: Mounting FastMCP on existing server")
|
|
775
856
|
|
|
776
857
|
# Get the existing minimal FastAPI app that's already running
|
|
777
858
|
existing_app = None
|
|
@@ -792,7 +873,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
792
873
|
if not existing_app:
|
|
793
874
|
raise ValueError("No existing FastAPI app found for server reuse")
|
|
794
875
|
|
|
795
|
-
self.logger.
|
|
876
|
+
self.logger.debug(
|
|
796
877
|
f"🔄 SERVER REUSE: Using existing FastAPI app '{existing_app.title}' for FastMCP mounting"
|
|
797
878
|
)
|
|
798
879
|
|
|
@@ -805,7 +886,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
805
886
|
mcp_wrappers = {}
|
|
806
887
|
if fastmcp_servers:
|
|
807
888
|
if fastmcp_lifespan and fastmcp_http_app:
|
|
808
|
-
self.logger.
|
|
889
|
+
self.logger.debug(
|
|
809
890
|
"✅ SERVER REUSE: FastMCP lifespan already integrated, mounting same HTTP app"
|
|
810
891
|
)
|
|
811
892
|
|
|
@@ -815,7 +896,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
815
896
|
# Mount the same FastMCP HTTP app that was used for lifespan integration
|
|
816
897
|
# This ensures the session manager is shared between lifespan and routes
|
|
817
898
|
existing_app.mount("", fastmcp_http_app)
|
|
818
|
-
self.logger.
|
|
899
|
+
self.logger.debug(
|
|
819
900
|
f"🔌 SERVER REUSE: Mounted FastMCP server '{server_key}' using stored HTTP app (lifespan already integrated)"
|
|
820
901
|
)
|
|
821
902
|
|
|
@@ -833,7 +914,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
833
914
|
f"Failed to mount server '{server_key}': {e}"
|
|
834
915
|
)
|
|
835
916
|
else:
|
|
836
|
-
self.logger.
|
|
917
|
+
self.logger.debug(
|
|
837
918
|
"🔄 SERVER REUSE: No FastMCP lifespan integrated, using HttpMcpWrapper"
|
|
838
919
|
)
|
|
839
920
|
|
|
@@ -850,7 +931,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
850
931
|
if mcp_wrapper._mcp_app:
|
|
851
932
|
# Mount at root since FastMCP creates its own /mcp routes internally
|
|
852
933
|
existing_app.mount("", mcp_wrapper._mcp_app)
|
|
853
|
-
self.logger.
|
|
934
|
+
self.logger.debug(
|
|
854
935
|
f"🔌 SERVER REUSE: Mounted FastMCP server '{server_key}' via HttpMcpWrapper at root (provides /mcp routes)"
|
|
855
936
|
)
|
|
856
937
|
|
|
@@ -868,10 +949,12 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
868
949
|
)
|
|
869
950
|
|
|
870
951
|
# Add K8s health endpoints to existing app (if not already present)
|
|
871
|
-
self._add_k8s_endpoints(
|
|
952
|
+
await self._add_k8s_endpoints(
|
|
953
|
+
existing_app, agent_config, mcp_wrappers, context
|
|
954
|
+
)
|
|
872
955
|
|
|
873
956
|
# FastMCP servers are already mounted directly - no additional integration needed
|
|
874
|
-
self.logger.
|
|
957
|
+
self.logger.debug(
|
|
875
958
|
"🔌 SERVER REUSE: All FastMCP servers mounted successfully"
|
|
876
959
|
)
|
|
877
960
|
|
|
@@ -889,7 +972,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
889
972
|
self._current_context["mcp_wrappers"] = mcp_wrappers
|
|
890
973
|
|
|
891
974
|
# FastMCP is now mounted directly - no server replacement needed
|
|
892
|
-
self.logger.
|
|
975
|
+
self.logger.debug(
|
|
893
976
|
"🔄 SERVER REUSE: FastMCP routes mounted to existing app successfully"
|
|
894
977
|
)
|
|
895
978
|
|
|
@@ -911,7 +994,7 @@ mcp_mesh_up{{agent="{agent_name}"}} 1
|
|
|
911
994
|
)
|
|
912
995
|
|
|
913
996
|
result.message = f"FastAPI app mounted on existing server {bind_host}:{bind_port} (external: {external_endpoint})"
|
|
914
|
-
self.logger.
|
|
997
|
+
self.logger.debug(
|
|
915
998
|
f"✅ SERVER REUSE: FastMCP mounted on existing server with {len(mcp_wrappers)} MCP wrappers"
|
|
916
999
|
)
|
|
917
1000
|
|
|
@@ -46,7 +46,7 @@ class FastMCPServerDiscoveryStep(PipelineStep):
|
|
|
46
46
|
server_info.append(info)
|
|
47
47
|
total_registered_functions += info.get("function_count", 0)
|
|
48
48
|
|
|
49
|
-
self.logger.
|
|
49
|
+
self.logger.debug(
|
|
50
50
|
f"📡 Discovered FastMCP server '{server_name}': "
|
|
51
51
|
f"{info.get('function_count', 0)} functions"
|
|
52
52
|
)
|
|
@@ -117,8 +117,9 @@ class HeartbeatPreparationStep(PipelineStep):
|
|
|
117
117
|
|
|
118
118
|
# Check if this function has @mesh.llm decorator (Phase 3)
|
|
119
119
|
llm_filter_data = None
|
|
120
|
+
llm_provider_data = None
|
|
120
121
|
llm_agents = DecoratorRegistry.get_mesh_llm_agents()
|
|
121
|
-
self.logger.
|
|
122
|
+
self.logger.debug(
|
|
122
123
|
f"🤖 Checking for LLM filter: function={func_name}, total_llm_agents_registered={len(llm_agents)}"
|
|
123
124
|
)
|
|
124
125
|
|
|
@@ -147,21 +148,65 @@ class HeartbeatPreparationStep(PipelineStep):
|
|
|
147
148
|
"filter": normalized_filter,
|
|
148
149
|
"filter_mode": filter_mode,
|
|
149
150
|
}
|
|
150
|
-
self.logger.
|
|
151
|
+
self.logger.debug(
|
|
151
152
|
f"🤖 LLM filter found for {func_name}: {len(normalized_filter)} filters, mode={filter_mode}, raw_filter={raw_filter}"
|
|
152
153
|
)
|
|
154
|
+
|
|
155
|
+
# Check if provider is a dict (mesh delegation mode - v0.6.1)
|
|
156
|
+
# If so, add it as llm_provider field (NOT in dependencies array)
|
|
157
|
+
provider = llm_metadata.config.get("provider")
|
|
158
|
+
if isinstance(provider, dict):
|
|
159
|
+
self.logger.debug(
|
|
160
|
+
f"🔌 LLM provider is dict (mesh delegation) for {func_name}: {provider}"
|
|
161
|
+
)
|
|
162
|
+
# Set llm_provider field (separate from dependencies)
|
|
163
|
+
# Registry will resolve this to an actual provider agent
|
|
164
|
+
llm_provider_data = {
|
|
165
|
+
"capability": provider.get("capability", "llm"),
|
|
166
|
+
"tags": provider.get("tags", []),
|
|
167
|
+
"version": provider.get("version", ""),
|
|
168
|
+
"namespace": provider.get("namespace", "default"),
|
|
169
|
+
}
|
|
170
|
+
self.logger.debug(
|
|
171
|
+
f"✅ LLM provider spec prepared for {func_name}: {llm_provider_data}"
|
|
172
|
+
)
|
|
173
|
+
|
|
153
174
|
break
|
|
154
175
|
|
|
155
176
|
# Build tool registration data
|
|
177
|
+
self.logger.debug(
|
|
178
|
+
f"Building tool_data for {func_name}, dependencies={dependencies}"
|
|
179
|
+
)
|
|
180
|
+
processed_deps = self._process_dependencies(dependencies)
|
|
181
|
+
self.logger.debug(
|
|
182
|
+
f"Processed dependencies for {func_name}: {processed_deps}"
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Extract kwargs (any extra fields not in standard set)
|
|
186
|
+
standard_fields = {
|
|
187
|
+
"capability",
|
|
188
|
+
"tags",
|
|
189
|
+
"version",
|
|
190
|
+
"description",
|
|
191
|
+
"dependencies",
|
|
192
|
+
}
|
|
193
|
+
kwargs_data = {
|
|
194
|
+
k: v for k, v in metadata.items() if k not in standard_fields
|
|
195
|
+
}
|
|
196
|
+
|
|
156
197
|
tool_data = {
|
|
157
198
|
"function_name": func_name,
|
|
158
199
|
"capability": metadata.get("capability"),
|
|
159
200
|
"tags": metadata.get("tags", []),
|
|
160
201
|
"version": metadata.get("version", "1.0.0"),
|
|
161
202
|
"description": metadata.get("description"),
|
|
162
|
-
"dependencies":
|
|
203
|
+
"dependencies": processed_deps,
|
|
163
204
|
"input_schema": input_schema, # Add inputSchema for LLM integration (Phase 2)
|
|
164
205
|
"llm_filter": llm_filter_data, # Add LLM filter for LLM integration (Phase 3)
|
|
206
|
+
"llm_provider": llm_provider_data, # Add LLM provider for mesh delegation (v0.6.1)
|
|
207
|
+
"kwargs": (
|
|
208
|
+
kwargs_data if kwargs_data else None
|
|
209
|
+
), # Add kwargs for vendor and other metadata
|
|
165
210
|
}
|
|
166
211
|
|
|
167
212
|
# Add debug pointer information only if debug flag is enabled
|
|
@@ -8,14 +8,14 @@ in @mesh.agent decorators to prevent Python interpreter shutdown.
|
|
|
8
8
|
import logging
|
|
9
9
|
from typing import Any, Dict, Optional
|
|
10
10
|
|
|
11
|
-
from ..shared import PipelineResult, PipelineStatus, PipelineStep
|
|
12
11
|
from ...shared.server_discovery import ServerDiscoveryUtil
|
|
12
|
+
from ..shared import PipelineResult, PipelineStatus, PipelineStep
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class ServerDiscoveryStep(PipelineStep):
|
|
16
16
|
"""
|
|
17
17
|
Discovers existing uvicorn servers that may be running.
|
|
18
|
-
|
|
18
|
+
|
|
19
19
|
This step checks if there's already a uvicorn server running on the target port,
|
|
20
20
|
which could happen when @mesh.agent(auto_run=True) starts an immediate uvicorn
|
|
21
21
|
server to prevent Python interpreter shutdown.
|
|
@@ -39,77 +39,100 @@ class ServerDiscoveryStep(PipelineStep):
|
|
|
39
39
|
agent_config = context.get("agent_config", {})
|
|
40
40
|
target_port = agent_config.get("http_port", 8080)
|
|
41
41
|
target_host = agent_config.get("http_host", "0.0.0.0")
|
|
42
|
-
|
|
43
|
-
self.logger.
|
|
42
|
+
|
|
43
|
+
self.logger.debug(
|
|
44
|
+
"🔍 DISCOVERY: Looking for immediate uvicorn server from DecoratorRegistry"
|
|
45
|
+
)
|
|
44
46
|
|
|
45
47
|
# Check DecoratorRegistry for immediate uvicorn server (much more reliable)
|
|
46
48
|
from ...engine.decorator_registry import DecoratorRegistry
|
|
49
|
+
|
|
47
50
|
existing_server = DecoratorRegistry.get_immediate_uvicorn_server()
|
|
48
51
|
|
|
49
52
|
# Debug: Show what we found
|
|
50
53
|
if existing_server:
|
|
51
54
|
server_status = existing_server.get("status", "unknown")
|
|
52
55
|
server_type = existing_server.get("type", "unknown")
|
|
53
|
-
self.logger.
|
|
56
|
+
self.logger.debug(
|
|
57
|
+
f"🔍 DISCOVERY: Found server - status='{server_status}', type='{server_type}'"
|
|
58
|
+
)
|
|
54
59
|
else:
|
|
55
|
-
self.logger.
|
|
56
|
-
|
|
60
|
+
self.logger.debug(
|
|
61
|
+
"🔍 DISCOVERY: No immediate uvicorn server found in registry"
|
|
62
|
+
)
|
|
63
|
+
|
|
57
64
|
if existing_server:
|
|
58
65
|
# Found existing immediate uvicorn server
|
|
59
|
-
server_host = existing_server.get(
|
|
60
|
-
server_port = existing_server.get(
|
|
61
|
-
|
|
66
|
+
server_host = existing_server.get("host", "unknown")
|
|
67
|
+
server_port = existing_server.get("port", 0)
|
|
68
|
+
|
|
62
69
|
result.add_context("existing_server", existing_server)
|
|
63
70
|
result.add_context("server_reuse", True)
|
|
64
|
-
|
|
71
|
+
|
|
65
72
|
# Get the FastAPI app directly from server info
|
|
66
|
-
existing_app = existing_server.get(
|
|
73
|
+
existing_app = existing_server.get("app")
|
|
67
74
|
if existing_app:
|
|
68
75
|
app_info = {
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
76
|
+
"instance": existing_app,
|
|
77
|
+
"title": getattr(
|
|
78
|
+
existing_app, "title", "MCP Mesh Agent (Starting)"
|
|
79
|
+
),
|
|
80
|
+
"version": getattr(existing_app, "version", "unknown"),
|
|
81
|
+
"object_id": id(existing_app),
|
|
82
|
+
"type": "immediate_uvicorn",
|
|
74
83
|
}
|
|
75
84
|
result.add_context("existing_fastapi_app", app_info)
|
|
76
85
|
result.message = (
|
|
77
86
|
f"Found immediate uvicorn server on {server_host}:{server_port} "
|
|
78
87
|
f"with FastAPI app '{app_info.get('title', 'Unknown')}'"
|
|
79
88
|
)
|
|
80
|
-
self.logger.
|
|
89
|
+
self.logger.debug(
|
|
81
90
|
f"✅ DISCOVERY: Found immediate uvicorn server on {server_host}:{server_port} "
|
|
82
91
|
f"with FastAPI app '{app_info.get('title', 'Unknown')}'"
|
|
83
92
|
)
|
|
84
93
|
else:
|
|
85
94
|
result.message = f"Found immediate uvicorn server on {server_host}:{server_port} (no FastAPI app reference)"
|
|
86
|
-
self.logger.warning(
|
|
87
|
-
|
|
95
|
+
self.logger.warning(
|
|
96
|
+
"⚠️ DISCOVERY: Found immediate uvicorn server but no FastAPI app reference"
|
|
97
|
+
)
|
|
98
|
+
|
|
88
99
|
else:
|
|
89
100
|
# No existing server found
|
|
90
101
|
result.add_context("existing_server", None)
|
|
91
102
|
result.add_context("server_reuse", False)
|
|
92
|
-
result.message =
|
|
93
|
-
|
|
103
|
+
result.message = (
|
|
104
|
+
"No immediate uvicorn server found in DecoratorRegistry"
|
|
105
|
+
)
|
|
106
|
+
self.logger.info(
|
|
107
|
+
"🔍 DISCOVERY: No immediate uvicorn server found - pipeline will start new server"
|
|
108
|
+
)
|
|
94
109
|
|
|
95
110
|
# Only discover FastAPI apps if no immediate uvicorn server was found
|
|
96
111
|
if not existing_server:
|
|
97
|
-
self.logger.debug(
|
|
112
|
+
self.logger.debug(
|
|
113
|
+
"🔍 DISCOVERY: No immediate uvicorn server found, discovering FastAPI apps via garbage collection"
|
|
114
|
+
)
|
|
98
115
|
fastapi_apps = ServerDiscoveryUtil.discover_fastapi_instances()
|
|
99
116
|
result.add_context("discovered_fastapi_apps", fastapi_apps)
|
|
100
|
-
|
|
117
|
+
|
|
101
118
|
if fastapi_apps:
|
|
102
119
|
app_count = len(fastapi_apps)
|
|
103
120
|
result.message += f" | Discovered {app_count} FastAPI app(s)"
|
|
104
|
-
self.logger.info(
|
|
105
|
-
|
|
121
|
+
self.logger.info(
|
|
122
|
+
f"📦 DISCOVERY: Discovered {app_count} FastAPI application(s) for potential mounting"
|
|
123
|
+
)
|
|
124
|
+
|
|
106
125
|
# Log details about discovered apps
|
|
107
126
|
for app_id, app_info in fastapi_apps.items():
|
|
108
127
|
app_title = app_info.get("title", "Unknown")
|
|
109
128
|
route_count = len(app_info.get("routes", []))
|
|
110
|
-
self.logger.debug(
|
|
129
|
+
self.logger.debug(
|
|
130
|
+
f" 📦 App '{app_title}' ({app_id}): {route_count} routes"
|
|
131
|
+
)
|
|
111
132
|
else:
|
|
112
|
-
self.logger.debug(
|
|
133
|
+
self.logger.debug(
|
|
134
|
+
"🔍 DISCOVERY: Using FastAPI app from immediate uvicorn server, skipping garbage collection discovery"
|
|
135
|
+
)
|
|
113
136
|
|
|
114
137
|
except Exception as e:
|
|
115
138
|
result.status = PipelineStatus.FAILED
|
|
@@ -119,46 +142,52 @@ class ServerDiscoveryStep(PipelineStep):
|
|
|
119
142
|
|
|
120
143
|
return result
|
|
121
144
|
|
|
122
|
-
def _find_associated_fastapi_app(
|
|
145
|
+
def _find_associated_fastapi_app(
|
|
146
|
+
self, server_info: dict[str, Any]
|
|
147
|
+
) -> Optional[dict[str, Any]]:
|
|
123
148
|
"""
|
|
124
149
|
Try to find the FastAPI app associated with the existing server.
|
|
125
|
-
|
|
150
|
+
|
|
126
151
|
Args:
|
|
127
152
|
server_info: Server information from discovery
|
|
128
|
-
|
|
153
|
+
|
|
129
154
|
Returns:
|
|
130
155
|
FastAPI app info if found, None otherwise
|
|
131
156
|
"""
|
|
132
157
|
try:
|
|
133
158
|
# Check if server info already has an app
|
|
134
|
-
if
|
|
135
|
-
app = server_info[
|
|
159
|
+
if "app" in server_info:
|
|
160
|
+
app = server_info["app"]
|
|
136
161
|
return {
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
162
|
+
"instance": app,
|
|
163
|
+
"title": getattr(app, "title", "Unknown"),
|
|
164
|
+
"version": getattr(app, "version", "unknown"),
|
|
165
|
+
"routes": ServerDiscoveryUtil._extract_route_info(app),
|
|
166
|
+
"object_id": id(app),
|
|
142
167
|
}
|
|
143
|
-
|
|
168
|
+
|
|
144
169
|
# If not, discover all FastAPI apps and try to match
|
|
145
170
|
fastapi_apps = ServerDiscoveryUtil.discover_fastapi_instances()
|
|
146
|
-
|
|
171
|
+
|
|
147
172
|
# For immediate uvicorn servers, look for apps with specific titles
|
|
148
173
|
for app_id, app_info in fastapi_apps.items():
|
|
149
|
-
app_title = app_info.get(
|
|
150
|
-
if
|
|
174
|
+
app_title = app_info.get("title", "")
|
|
175
|
+
if "MCP Mesh Agent" in app_title and "Starting" in app_title:
|
|
151
176
|
# This looks like our immediate uvicorn app
|
|
152
|
-
self.logger.debug(
|
|
177
|
+
self.logger.debug(
|
|
178
|
+
f"🔍 DISCOVERY: Found immediate uvicorn FastAPI app: {app_title}"
|
|
179
|
+
)
|
|
153
180
|
return app_info
|
|
154
|
-
|
|
181
|
+
|
|
155
182
|
# If no immediate uvicorn app found, return the first available app
|
|
156
183
|
if fastapi_apps:
|
|
157
184
|
first_app = next(iter(fastapi_apps.values()))
|
|
158
|
-
self.logger.debug(
|
|
185
|
+
self.logger.debug(
|
|
186
|
+
f"🔍 DISCOVERY: Using first available FastAPI app: {first_app.get('title', 'Unknown')}"
|
|
187
|
+
)
|
|
159
188
|
return first_app
|
|
160
|
-
|
|
189
|
+
|
|
161
190
|
except Exception as e:
|
|
162
191
|
self.logger.warning(f"Error finding associated FastAPI app: {e}")
|
|
163
|
-
|
|
164
|
-
return None
|
|
192
|
+
|
|
193
|
+
return None
|
|
@@ -228,7 +228,7 @@ class DebounceCoordinator:
|
|
|
228
228
|
fastapi_app, binding_config
|
|
229
229
|
)
|
|
230
230
|
elif server_status == "running":
|
|
231
|
-
self.logger.
|
|
231
|
+
self.logger.debug(
|
|
232
232
|
"🔄 RUNNING SERVER: Server already running with proper lifecycle, pipeline skipping uvicorn.run()"
|
|
233
233
|
)
|
|
234
234
|
self.logger.info(
|
|
@@ -493,7 +493,7 @@ class MeshOrchestrator:
|
|
|
493
493
|
|
|
494
494
|
This replaces the background polling with explicit execution.
|
|
495
495
|
"""
|
|
496
|
-
self.logger.
|
|
496
|
+
self.logger.debug(f"🚀 Starting single pipeline execution: {self.name}")
|
|
497
497
|
|
|
498
498
|
result = await self.pipeline.execute()
|
|
499
499
|
|