mcp-mesh 0.5.7__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/base_injector.py +171 -0
  3. _mcp_mesh/engine/decorator_registry.py +136 -33
  4. _mcp_mesh/engine/dependency_injector.py +91 -18
  5. _mcp_mesh/engine/http_wrapper.py +5 -22
  6. _mcp_mesh/engine/llm_config.py +41 -0
  7. _mcp_mesh/engine/llm_errors.py +115 -0
  8. _mcp_mesh/engine/mesh_llm_agent.py +440 -0
  9. _mcp_mesh/engine/mesh_llm_agent_injector.py +487 -0
  10. _mcp_mesh/engine/response_parser.py +240 -0
  11. _mcp_mesh/engine/signature_analyzer.py +229 -99
  12. _mcp_mesh/engine/tool_executor.py +169 -0
  13. _mcp_mesh/engine/tool_schema_builder.py +125 -0
  14. _mcp_mesh/engine/unified_mcp_proxy.py +14 -12
  15. _mcp_mesh/generated/.openapi-generator/FILES +4 -0
  16. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +81 -44
  17. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +72 -35
  18. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +132 -0
  19. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +172 -0
  20. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +92 -0
  21. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +121 -0
  22. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +98 -51
  23. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +93 -44
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +84 -41
  25. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +9 -72
  26. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +6 -3
  27. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +222 -0
  28. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +7 -0
  29. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +65 -4
  30. _mcp_mesh/pipeline/mcp_startup/startup_pipeline.py +2 -2
  31. _mcp_mesh/shared/registry_client_wrapper.py +60 -4
  32. _mcp_mesh/utils/fastmcp_schema_extractor.py +476 -0
  33. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/METADATA +1 -1
  34. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/RECORD +39 -25
  35. mesh/__init__.py +8 -4
  36. mesh/decorators.py +344 -2
  37. mesh/types.py +145 -94
  38. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/WHEEL +0 -0
  39. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -413,85 +413,22 @@ class APIDependencyResolutionStep(PipelineStep):
413
413
  """
414
414
  Determine which proxy type to use for API route handlers.
415
415
 
416
- For API services, we need to check the parameter types used in FastAPI route handlers
417
- that depend on this capability. This is different from MCP tools because route handlers
418
- are wrapped differently.
419
-
420
- Logic:
421
- 1. Check if any API route handlers use McpAgent for this capability
422
- 2. If yes → use FullMCPProxy
423
- 3. Otherwise → use MCPClientProxy (for McpMeshAgent or untyped)
416
+ Since McpAgent has been removed, all API route handlers now use MCPClientProxy
417
+ for McpMeshAgent parameters.
424
418
 
425
419
  Args:
426
420
  capability: The capability name to check
427
421
  injector: The dependency injector instance
428
422
 
429
423
  Returns:
430
- "FullMCPProxy" or "MCPClientProxy"
424
+ "MCPClientProxy"
431
425
  """
432
- try:
433
- # Get functions that depend on this capability
434
- if capability not in injector._dependency_mapping:
435
- self.logger.debug(
436
- f"🔍 No API route handlers depend on capability '{capability}', using MCPClientProxy"
437
- )
438
- return "MCPClientProxy"
439
-
440
- affected_function_ids = injector._dependency_mapping[capability]
441
-
442
- # Scan ALL route handlers to detect ANY McpAgent usage
443
- mcpagent_functions = []
444
- mcpmeshagent_functions = []
445
-
446
- for func_id in affected_function_ids:
447
- if func_id in injector._function_registry:
448
- wrapper_func = injector._function_registry[func_id]
449
-
450
- # Get stored parameter types from wrapper (same pattern as MCP)
451
- if hasattr(wrapper_func, "_mesh_parameter_types") and hasattr(
452
- wrapper_func, "_mesh_dependencies"
453
- ):
454
- parameter_types = wrapper_func._mesh_parameter_types
455
- dependencies = wrapper_func._mesh_dependencies
456
- mesh_positions = wrapper_func._mesh_positions
457
-
458
- # Find which parameter position corresponds to this capability
459
- for dep_index, dep_name in enumerate(dependencies):
460
- if dep_name == capability and dep_index < len(
461
- mesh_positions
462
- ):
463
- param_position = mesh_positions[dep_index]
464
-
465
- # Check the parameter type at this position
466
- if param_position in parameter_types:
467
- param_type = parameter_types[param_position]
468
- if param_type == "McpAgent":
469
- mcpagent_functions.append(func_id)
470
- elif param_type == "McpMeshAgent":
471
- mcpmeshagent_functions.append(func_id)
472
-
473
- # Make deterministic decision based on complete analysis
474
- if mcpagent_functions:
475
- self.logger.debug(
476
- f"🔍 Found McpAgent in API route handlers {mcpagent_functions} for capability '{capability}' → using FullMCPProxy"
477
- )
478
- if mcpmeshagent_functions:
479
- self.logger.info(
480
- f"ℹ️ API capability '{capability}' used by both McpAgent {mcpagent_functions} and McpMeshAgent {mcpmeshagent_functions} → upgrading ALL to FullMCPProxy"
481
- )
482
- return "FullMCPProxy"
483
- else:
484
- # Only McpMeshAgent or untyped parameters
485
- self.logger.debug(
486
- f"🔍 Only McpMeshAgent/untyped API route handlers {mcpmeshagent_functions} for capability '{capability}' → using MCPClientProxy"
487
- )
488
- return "MCPClientProxy"
489
-
490
- except Exception as e:
491
- self.logger.warning(
492
- f"⚠️ Failed to determine proxy type for API capability '{capability}': {e}"
493
- )
494
- return "MCPClientProxy" # Safe default
426
+ # Note: This method always returns "MCPClientProxy" since McpAgent was removed.
427
+ # All McpMeshAgent parameters use MCPClientProxy.
428
+ self.logger.debug(
429
+ f"🔍 API route handlers for capability '{capability}' → using MCPClientProxy"
430
+ )
431
+ return "MCPClientProxy"
495
432
 
496
433
  def _create_proxy_for_api(
497
434
  self,
@@ -15,6 +15,7 @@ from ..shared.mesh_pipeline import MeshPipeline
15
15
  from .dependency_resolution import DependencyResolutionStep
16
16
  from .fast_heartbeat_check import FastHeartbeatStep
17
17
  from .heartbeat_send import HeartbeatSendStep
18
+ from .llm_tools_resolution import LLMToolsResolutionStep
18
19
  from .registry_connection import RegistryConnectionStep
19
20
 
20
21
  logger = logging.getLogger(__name__)
@@ -24,13 +25,14 @@ class HeartbeatPipeline(MeshPipeline):
24
25
  """
25
26
  Specialized pipeline for heartbeat operations with fast optimization.
26
27
 
27
- Executes the four core heartbeat steps in sequence:
28
+ Executes the five core heartbeat steps in sequence:
28
29
  1. Registry connection preparation
29
30
  2. Fast heartbeat check (HEAD request)
30
31
  3. Heartbeat sending (conditional POST request)
31
32
  4. Dependency resolution (conditional)
33
+ 5. LLM tools resolution (conditional)
32
34
 
33
- Steps 3 and 4 only run if fast heartbeat indicates changes are needed.
35
+ Steps 3, 4, and 5 only run if fast heartbeat indicates changes are needed.
34
36
  Provides optimization for NO_CHANGES and resilience for error conditions.
35
37
  """
36
38
 
@@ -45,6 +47,7 @@ class HeartbeatPipeline(MeshPipeline):
45
47
  FastHeartbeatStep(),
46
48
  HeartbeatSendStep(required=True),
47
49
  DependencyResolutionStep(),
50
+ LLMToolsResolutionStep(),
48
51
  ]
49
52
 
50
53
  self.add_steps(steps)
@@ -142,7 +145,7 @@ class HeartbeatPipeline(MeshPipeline):
142
145
  ] # RegistryConnectionStep, FastHeartbeatStep
143
146
  conditional_steps = self.steps[
144
147
  2:
145
- ] # HeartbeatSendStep, DependencyResolutionStep
148
+ ] # HeartbeatSendStep, DependencyResolutionStep, LLMToolsResolutionStep
146
149
 
147
150
  # Execute mandatory steps
148
151
  for step in mandatory_steps:
@@ -0,0 +1,222 @@
1
+ """
2
+ LLM tools resolution step for MCP Mesh pipeline.
3
+
4
+ Handles processing llm_tools from registry response and updating
5
+ the LLM agent injection system.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ from typing import Any
11
+
12
+ from ...engine.dependency_injector import get_global_injector
13
+ from ..shared import PipelineResult, PipelineStatus, PipelineStep
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # Global state for LLM tools hash tracking across heartbeat cycles
18
+ _last_llm_tools_hash = None
19
+
20
+
21
+ class LLMToolsResolutionStep(PipelineStep):
22
+ """
23
+ Processes LLM tools from registry response.
24
+
25
+ Takes the llm_tools data from the heartbeat response and updates
26
+ the LLM agent injection system. This enables LLM agents to receive
27
+ auto-filtered, up-to-date tool lists based on their llm_filter configuration.
28
+
29
+ The registry applies filtering logic and returns matching tools with
30
+ full schemas that can be used by LLM agents.
31
+ """
32
+
33
+ def __init__(self):
34
+ super().__init__(
35
+ name="llm-tools-resolution",
36
+ required=False, # Optional - only needed for LLM agents
37
+ description="Process LLM tools resolution from registry",
38
+ )
39
+
40
+ async def execute(self, context: dict[str, Any]) -> PipelineResult:
41
+ """Process LLM tools resolution with hash-based change detection."""
42
+ self.logger.debug("Processing LLM tools resolution...")
43
+
44
+ result = PipelineResult(message="LLM tools resolution processed")
45
+
46
+ try:
47
+ # Get heartbeat response
48
+ heartbeat_response = context.get("heartbeat_response")
49
+
50
+ if heartbeat_response is None:
51
+ result.status = PipelineStatus.SUCCESS
52
+ result.message = "No heartbeat response - completed successfully"
53
+ self.logger.debug("ℹ️ No heartbeat response to process - this is normal")
54
+ return result
55
+
56
+ # Use hash-based change detection and processing logic
57
+ await self.process_llm_tools_from_heartbeat(heartbeat_response)
58
+
59
+ # Extract LLM tools count for context
60
+ llm_tools = heartbeat_response.get("llm_tools", {})
61
+ function_count = len(llm_tools)
62
+ tool_count = sum(
63
+ len(tools) if isinstance(tools, list) else 0
64
+ for tools in llm_tools.values()
65
+ )
66
+
67
+ # Store processed LLM tools info for context
68
+ result.add_context("llm_function_count", function_count)
69
+ result.add_context("llm_tool_count", tool_count)
70
+ result.add_context("llm_tools", llm_tools)
71
+
72
+ result.message = "LLM tools resolution completed (efficient hash-based)"
73
+
74
+ if function_count > 0:
75
+ self.logger.info(
76
+ f"🤖 LLM tools resolved: {function_count} functions, {tool_count} tools"
77
+ )
78
+
79
+ self.logger.debug(
80
+ "🤖 LLM tools resolution step completed using hash-based change detection"
81
+ )
82
+
83
+ except Exception as e:
84
+ result.status = PipelineStatus.FAILED
85
+ result.message = f"LLM tools resolution failed: {e}"
86
+ result.add_error(str(e))
87
+ self.logger.error(f"❌ LLM tools resolution failed: {e}")
88
+
89
+ return result
90
+
91
+ def _extract_llm_tools_state(
92
+ self, heartbeat_response: dict[str, Any]
93
+ ) -> dict[str, list[dict[str, Any]]]:
94
+ """Extract LLM tools state structure from heartbeat response.
95
+
96
+ Preserves array structure and order from registry.
97
+
98
+ Returns:
99
+ {function_id: [{function_name, capability, endpoint, input_schema, ...}, ...]}
100
+ """
101
+ llm_tools = heartbeat_response.get("llm_tools", {})
102
+
103
+ if not isinstance(llm_tools, dict):
104
+ self.logger.warning(f"llm_tools is not a dict, type={type(llm_tools)}")
105
+ return {}
106
+
107
+ # Return as-is since registry already provides the correct structure
108
+ # Filter out non-dict or non-list values for safety
109
+ state = {}
110
+ for function_id, tools in llm_tools.items():
111
+ if isinstance(tools, list):
112
+ state[function_id] = tools
113
+
114
+ return state
115
+
116
+ def _hash_llm_tools_state(self, state: dict) -> str:
117
+ """Create hash of LLM tools state structure."""
118
+ import hashlib
119
+
120
+ # Convert to sorted JSON string for consistent hashing
121
+ state_json = json.dumps(state, sort_keys=True)
122
+
123
+ hash_value = hashlib.sha256(state_json.encode()).hexdigest()[:16]
124
+
125
+ return hash_value
126
+
127
+ async def process_llm_tools_from_heartbeat(
128
+ self, heartbeat_response: dict[str, Any]
129
+ ) -> None:
130
+ """Process heartbeat response to update LLM agent injection.
131
+
132
+ Uses hash-based comparison to efficiently detect when ANY LLM tools change
133
+ and then updates ALL affected LLM agents in one operation.
134
+
135
+ Resilience logic:
136
+ - No response (connection error, 5xx) → Skip entirely (keep existing tools)
137
+ - 2xx response with empty llm_tools → Clear all LLM tools
138
+ - 2xx response with partial llm_tools → Update to match registry exactly
139
+ """
140
+ try:
141
+ if not heartbeat_response:
142
+ # No response from registry (connection error, timeout, 5xx)
143
+ # → Skip entirely for resilience (keep existing LLM tools)
144
+ self.logger.debug(
145
+ "No heartbeat response - skipping LLM tools processing for resilience"
146
+ )
147
+ return
148
+
149
+ # Extract current LLM tools state
150
+ current_state = self._extract_llm_tools_state(heartbeat_response)
151
+
152
+ # IMPORTANT: Empty state from successful response means "no LLM tools"
153
+ # This is different from "no response" which means "keep existing for resilience"
154
+
155
+ # Hash the current state (including empty state)
156
+ current_hash = self._hash_llm_tools_state(current_state)
157
+
158
+ # Compare with previous state (use global variable)
159
+ global _last_llm_tools_hash
160
+ if current_hash == _last_llm_tools_hash:
161
+ self.logger.debug(
162
+ f"🔄 LLM tools state unchanged (hash: {current_hash}), skipping processing"
163
+ )
164
+ return
165
+
166
+ # State changed - determine what changed
167
+ function_count = len(current_state)
168
+ total_tools = sum(len(tools) for tools in current_state.values())
169
+
170
+ if _last_llm_tools_hash is None:
171
+ if function_count > 0:
172
+ self.logger.info(
173
+ f"🤖 Initial LLM tools state detected: {function_count} functions, {total_tools} tools"
174
+ )
175
+ else:
176
+ self.logger.info(
177
+ "🤖 Initial LLM tools state detected: no LLM tools"
178
+ )
179
+ else:
180
+ self.logger.info(
181
+ f"🤖 LLM tools state changed (hash: {_last_llm_tools_hash} → {current_hash})"
182
+ )
183
+ if function_count > 0:
184
+ self.logger.info(
185
+ f"🤖 Updating LLM tools for {function_count} functions ({total_tools} total tools)"
186
+ )
187
+ else:
188
+ self.logger.info(
189
+ "🤖 Registry reports no LLM tools - clearing all existing LLM tools"
190
+ )
191
+
192
+ injector = get_global_injector()
193
+
194
+ # Determine if this is initial processing or an update
195
+ if _last_llm_tools_hash is None:
196
+ # Initial processing - use process_llm_tools
197
+ self.logger.debug(
198
+ "🤖 Initial LLM tools processing - calling process_llm_tools()"
199
+ )
200
+ injector.process_llm_tools(current_state)
201
+ else:
202
+ # Update - use update_llm_tools
203
+ self.logger.debug("🤖 LLM tools update - calling update_llm_tools()")
204
+ injector.update_llm_tools(current_state)
205
+
206
+ # Store new hash for next comparison (use global variable)
207
+ _last_llm_tools_hash = current_hash
208
+
209
+ if function_count > 0:
210
+ self.logger.info(
211
+ f"✅ Successfully processed LLM tools for {function_count} functions ({total_tools} tools, state hash: {current_hash})"
212
+ )
213
+ else:
214
+ self.logger.info(
215
+ f"✅ LLM tools state synchronized (no tools, state hash: {current_hash})"
216
+ )
217
+
218
+ except Exception as e:
219
+ self.logger.error(
220
+ f"❌ Failed to process LLM tools from heartbeat: {e}", exc_info=True
221
+ )
222
+ # Don't raise - this should not break the heartbeat loop
@@ -57,6 +57,13 @@ class FastMCPServerDiscoveryStep(PipelineStep):
57
57
  result.add_context("fastmcp_server_count", len(discovered_servers))
58
58
  result.add_context("fastmcp_total_functions", total_registered_functions)
59
59
 
60
+ # Store server info in DecoratorRegistry for heartbeat schema extraction (Phase 2)
61
+ from ...engine.decorator_registry import DecoratorRegistry
62
+
63
+ # Convert server_info list to dict for easier lookup
64
+ server_info_dict = {info["server_name"]: info for info in server_info}
65
+ DecoratorRegistry.store_fastmcp_server_info(server_info_dict)
66
+
60
67
  result.message = (
61
68
  f"Discovered {len(discovered_servers)} FastMCP servers "
62
69
  f"with {total_registered_functions} total functions"
@@ -9,6 +9,7 @@ from ...engine.decorator_registry import DecoratorRegistry
9
9
  from ...engine.signature_analyzer import validate_mesh_dependencies
10
10
  from ...shared.config_resolver import ValidationRule, get_config_value
11
11
  from ...shared.support_types import HealthStatus, HealthStatusType
12
+ from ...utils.fastmcp_schema_extractor import FastMCPSchemaExtractor
12
13
  from ..shared import PipelineResult, PipelineStatus, PipelineStep
13
14
 
14
15
 
@@ -39,8 +40,17 @@ class HeartbeatPreparationStep(PipelineStep):
39
40
  agent_config = DecoratorRegistry.get_resolved_agent_config()
40
41
  agent_id = agent_config["agent_id"]
41
42
 
42
- # Build tools list for registration
43
- tools_list = self._build_tools_list(mesh_tools)
43
+ # Get FastMCP server info from context (set by fastmcp-server-discovery step)
44
+ fastmcp_server_info = context.get("fastmcp_server_info", [])
45
+
46
+ # Convert server_info list to dict for schema extractor
47
+ fastmcp_servers = {}
48
+ for server_info in fastmcp_server_info:
49
+ server_name = server_info.get("server_name", "unknown")
50
+ fastmcp_servers[server_name] = server_info
51
+
52
+ # Build tools list for registration (with FastMCP schemas)
53
+ tools_list = self._build_tools_list(mesh_tools, fastmcp_servers)
44
54
 
45
55
  # Build agent registration payload
46
56
  registration_data = self._build_registration_payload(
@@ -71,8 +81,10 @@ class HeartbeatPreparationStep(PipelineStep):
71
81
 
72
82
  return result
73
83
 
74
- def _build_tools_list(self, mesh_tools: dict[str, Any]) -> list[dict[str, Any]]:
75
- """Build tools list from mesh_tools, validating function signatures."""
84
+ def _build_tools_list(
85
+ self, mesh_tools: dict[str, Any], fastmcp_servers: dict[str, Any] = None
86
+ ) -> list[dict[str, Any]]:
87
+ """Build tools list from mesh_tools, validating function signatures and extracting schemas."""
76
88
  tools_list = []
77
89
  skipped_tools = []
78
90
 
@@ -93,6 +105,53 @@ class HeartbeatPreparationStep(PipelineStep):
93
105
  skipped_tools.append(func_name)
94
106
  continue
95
107
 
108
+ # Extract inputSchema from FastMCP tool (if available)
109
+ # First try matching with FastMCP servers, then fallback to direct attribute
110
+ input_schema = FastMCPSchemaExtractor.extract_from_fastmcp_servers(
111
+ current_function, fastmcp_servers
112
+ )
113
+ if input_schema is None:
114
+ input_schema = FastMCPSchemaExtractor.extract_input_schema(
115
+ current_function
116
+ )
117
+
118
+ # Check if this function has @mesh.llm decorator (Phase 3)
119
+ llm_filter_data = None
120
+ llm_agents = DecoratorRegistry.get_mesh_llm_agents()
121
+ self.logger.info(
122
+ f"🤖 Checking for LLM filter: function={func_name}, total_llm_agents_registered={len(llm_agents)}"
123
+ )
124
+
125
+ for llm_agent_id, llm_metadata in llm_agents.items():
126
+ if llm_metadata.function.__name__ == func_name:
127
+ # Found matching LLM agent - extract filter config
128
+ raw_filter = llm_metadata.config.get("filter")
129
+ filter_mode = llm_metadata.config.get("filter_mode", "all")
130
+
131
+ # Normalize filter to array format (OpenAPI schema requirement)
132
+ if raw_filter is None:
133
+ normalized_filter = []
134
+ elif isinstance(raw_filter, str):
135
+ normalized_filter = [raw_filter]
136
+ elif isinstance(raw_filter, dict):
137
+ normalized_filter = [raw_filter]
138
+ elif isinstance(raw_filter, list):
139
+ normalized_filter = raw_filter
140
+ else:
141
+ self.logger.warning(
142
+ f"⚠️ Invalid filter type for {func_name}: {type(raw_filter)}"
143
+ )
144
+ normalized_filter = []
145
+
146
+ llm_filter_data = {
147
+ "filter": normalized_filter,
148
+ "filter_mode": filter_mode,
149
+ }
150
+ self.logger.info(
151
+ f"🤖 LLM filter found for {func_name}: {len(normalized_filter)} filters, mode={filter_mode}, raw_filter={raw_filter}"
152
+ )
153
+ break
154
+
96
155
  # Build tool registration data
97
156
  tool_data = {
98
157
  "function_name": func_name,
@@ -101,6 +160,8 @@ class HeartbeatPreparationStep(PipelineStep):
101
160
  "version": metadata.get("version", "1.0.0"),
102
161
  "description": metadata.get("description"),
103
162
  "dependencies": self._process_dependencies(dependencies),
163
+ "input_schema": input_schema, # Add inputSchema for LLM integration (Phase 2)
164
+ "llm_filter": llm_filter_data, # Add LLM filter for LLM integration (Phase 3)
104
165
  }
105
166
 
106
167
  # Add debug pointer information only if debug flag is enabled
@@ -49,9 +49,9 @@ class StartupPipeline(MeshPipeline):
49
49
  steps = [
50
50
  DecoratorCollectionStep(),
51
51
  ConfigurationStep(),
52
- HeartbeatPreparationStep(), # Prepare heartbeat payload structure
52
+ FastMCPServerDiscoveryStep(), # Discover user's FastMCP instances (MOVED UP for Phase 2)
53
+ HeartbeatPreparationStep(), # Prepare heartbeat payload structure (can now access FastMCP schemas)
53
54
  ServerDiscoveryStep(), # Discover existing uvicorn servers from immediate startup
54
- FastMCPServerDiscoveryStep(), # Discover user's FastMCP instances
55
55
  HeartbeatLoopStep(), # Setup background heartbeat config (handles no registry gracefully)
56
56
  FastAPIServerSetupStep(), # Setup FastAPI app with background heartbeat
57
57
  # Note: Registry connection is handled in heartbeat pipeline for retry behavior
@@ -7,7 +7,7 @@ that handles conversion between simple Python dicts and Pydantic models.
7
7
 
8
8
  import logging
9
9
  from datetime import UTC, datetime
10
- from typing import Any, Dict, Optional
10
+ from typing import Any, Optional
11
11
 
12
12
  from _mcp_mesh.generated.mcp_mesh_registry_client.api.agents_api import AgentsApi
13
13
  from _mcp_mesh.generated.mcp_mesh_registry_client.api_client import ApiClient
@@ -72,7 +72,7 @@ class RegistryClientWrapper:
72
72
  )
73
73
 
74
74
  registration_json = json.dumps(registration_dict, indent=2, default=str)
75
- self.logger.debug(
75
+ self.logger.info(
76
76
  f"🔍 Full heartbeat registration payload:\n{registration_json}"
77
77
  )
78
78
 
@@ -80,7 +80,9 @@ class RegistryClientWrapper:
80
80
  response = self.agents_api.send_heartbeat(agent_registration)
81
81
 
82
82
  # Convert response to dict
83
- return self._response_to_dict(response)
83
+ response_dict = self._response_to_dict(response)
84
+
85
+ return response_dict
84
86
 
85
87
  except Exception as e:
86
88
  self.logger.error(
@@ -332,6 +334,7 @@ class RegistryClientWrapper:
332
334
 
333
335
  # Import here to avoid circular imports
334
336
  from _mcp_mesh.engine.decorator_registry import DecoratorRegistry
337
+ from _mcp_mesh.utils.fastmcp_schema_extractor import FastMCPSchemaExtractor
335
338
 
336
339
  # Get current tools from registry
337
340
  mesh_tools = DecoratorRegistry.get_mesh_tools()
@@ -374,7 +377,58 @@ class RegistryClientWrapper:
374
377
  k: v for k, v in metadata.items() if k not in standard_fields
375
378
  }
376
379
 
377
- # Create tool registration with kwargs support
380
+ # Extract inputSchema from FastMCP tool (Phase 2: Schema Collection)
381
+ # First try to get FastMCP server info from DecoratorRegistry
382
+ fastmcp_servers = DecoratorRegistry.get_fastmcp_server_info()
383
+ input_schema = None
384
+
385
+ if fastmcp_servers:
386
+ # Try comprehensive extraction using server context
387
+ input_schema = FastMCPSchemaExtractor.extract_from_fastmcp_servers(
388
+ decorated_func.function, fastmcp_servers
389
+ )
390
+
391
+ # Fallback to direct attribute check if server lookup didn't work
392
+ if input_schema is None:
393
+ input_schema = FastMCPSchemaExtractor.extract_input_schema(
394
+ decorated_func.function
395
+ )
396
+
397
+ # Extract llm_filter from @mesh.llm decorator (Phase 3: LLM Integration)
398
+ llm_agents = DecoratorRegistry.get_mesh_llm_agents()
399
+ llm_filter_data = None
400
+
401
+ for llm_agent_id, llm_metadata in llm_agents.items():
402
+ # Match by function name (decorated_func.function is the wrapper, need to check original)
403
+ if llm_metadata.function.__name__ == func_name:
404
+ # Found matching LLM agent - extract filter config
405
+ raw_filter = llm_metadata.config.get("filter")
406
+ filter_mode = llm_metadata.config.get("filter_mode", "all")
407
+
408
+ # Normalize filter to array format
409
+ if raw_filter is None:
410
+ normalized_filter = []
411
+ elif isinstance(raw_filter, list):
412
+ normalized_filter = raw_filter
413
+ elif isinstance(raw_filter, dict):
414
+ # Single dict filter like {'capability': 'date_service'}
415
+ normalized_filter = [raw_filter]
416
+ elif isinstance(raw_filter, str):
417
+ normalized_filter = [raw_filter] if raw_filter else []
418
+ else:
419
+ normalized_filter = []
420
+
421
+ llm_filter_data = {
422
+ "filter": normalized_filter,
423
+ "filter_mode": filter_mode,
424
+ }
425
+
426
+ self.logger.debug(
427
+ f"🤖 Extracted llm_filter for {func_name}: {len(normalized_filter)} filters, mode={filter_mode}"
428
+ )
429
+ break
430
+
431
+ # Create tool registration with llm_filter as separate top-level field (not in kwargs)
378
432
  tool_reg = MeshToolRegistration(
379
433
  function_name=func_name,
380
434
  capability=metadata.get("capability"),
@@ -382,6 +436,8 @@ class RegistryClientWrapper:
382
436
  version=metadata.get("version", "1.0.0"),
383
437
  dependencies=dep_registrations,
384
438
  description=metadata.get("description"),
439
+ llm_filter=llm_filter_data, # Pass llm_filter as top-level parameter
440
+ input_schema=input_schema, # Pass inputSchema as top-level parameter (not in kwargs)
385
441
  kwargs=kwargs_data if kwargs_data else None,
386
442
  )
387
443
  tools.append(tool_reg)