mcp-mesh 0.5.7__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/base_injector.py +171 -0
  3. _mcp_mesh/engine/decorator_registry.py +136 -33
  4. _mcp_mesh/engine/dependency_injector.py +91 -18
  5. _mcp_mesh/engine/http_wrapper.py +5 -22
  6. _mcp_mesh/engine/llm_config.py +41 -0
  7. _mcp_mesh/engine/llm_errors.py +115 -0
  8. _mcp_mesh/engine/mesh_llm_agent.py +440 -0
  9. _mcp_mesh/engine/mesh_llm_agent_injector.py +487 -0
  10. _mcp_mesh/engine/response_parser.py +240 -0
  11. _mcp_mesh/engine/signature_analyzer.py +229 -99
  12. _mcp_mesh/engine/tool_executor.py +169 -0
  13. _mcp_mesh/engine/tool_schema_builder.py +125 -0
  14. _mcp_mesh/engine/unified_mcp_proxy.py +14 -12
  15. _mcp_mesh/generated/.openapi-generator/FILES +4 -0
  16. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +81 -44
  17. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +72 -35
  18. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +132 -0
  19. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +172 -0
  20. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +92 -0
  21. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +121 -0
  22. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +98 -51
  23. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +93 -44
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +84 -41
  25. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +9 -72
  26. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +6 -3
  27. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +222 -0
  28. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +7 -0
  29. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +65 -4
  30. _mcp_mesh/pipeline/mcp_startup/startup_pipeline.py +2 -2
  31. _mcp_mesh/shared/registry_client_wrapper.py +60 -4
  32. _mcp_mesh/utils/fastmcp_schema_extractor.py +476 -0
  33. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/METADATA +1 -1
  34. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/RECORD +39 -25
  35. mesh/__init__.py +8 -4
  36. mesh/decorators.py +344 -2
  37. mesh/types.py +145 -94
  38. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/WHEEL +0 -0
  39. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/licenses/LICENSE +0 -0
mesh/decorators.py CHANGED
@@ -7,6 +7,7 @@ Provides @mesh.tool and @mesh.agent decorators with clean separation of concerns
7
7
  import logging
8
8
  import uuid
9
9
  from collections.abc import Callable
10
+ from functools import wraps
10
11
  from typing import Any, TypeVar
11
12
 
12
13
  # Import from _mcp_mesh for registry and runtime integration
@@ -758,8 +759,8 @@ def route(
758
759
  async def upload_resume(
759
760
  request: Request,
760
761
  file: UploadFile = File(...),
761
- pdf_agent: McpAgent = None, # Injected by MCP Mesh
762
- user_service: McpAgent = None # Injected by MCP Mesh
762
+ pdf_agent: mesh.McpMeshAgent = None, # Injected by MCP Mesh
763
+ user_service: mesh.McpMeshAgent = None # Injected by MCP Mesh
763
764
  ):
764
765
  result = await pdf_agent.extract_text_from_pdf(file)
765
766
  await user_service.update_profile(user_data, result)
@@ -929,3 +930,344 @@ def set_shutdown_context(context: dict[str, Any]):
929
930
  """Set context for graceful shutdown (called from pipeline)."""
930
931
  # Delegate to the shared graceful shutdown manager
931
932
  set_global_shutdown_context(context)
933
+
934
+
935
+ def llm(
936
+ filter: dict[str, Any] | list[dict[str, Any] | str] | str | None = None,
937
+ *,
938
+ filter_mode: str = "all",
939
+ provider: str = "claude",
940
+ model: str | None = None,
941
+ api_key: str | None = None,
942
+ max_iterations: int = 10,
943
+ system_prompt: str | None = None,
944
+ system_prompt_file: str | None = None,
945
+ context_param: str | None = None,
946
+ **kwargs: Any,
947
+ ) -> Callable[[T], T]:
948
+ """
949
+ LLM agent decorator with automatic agentic loop.
950
+
951
+ This decorator enables LLM agents to automatically access mesh tools via
952
+ dependency injection. The MeshLlmAgent proxy handles the complete agentic loop:
953
+ - Tool filtering based on filter parameter
954
+ - LLM API calls (Claude, OpenAI, etc. via LiteLLM)
955
+ - Tool execution via MCP proxies
956
+ - Response parsing to Pydantic models
957
+
958
+ Configuration Hierarchy (ENV > Decorator):
959
+ - MESH_LLM_PROVIDER: Override provider
960
+ - MESH_LLM_MODEL: Override model
961
+ - ANTHROPIC_API_KEY: Claude API key
962
+ - OPENAI_API_KEY: OpenAI API key
963
+ - MESH_LLM_MAX_ITERATIONS: Override max iterations
964
+
965
+ Usage:
966
+ from pydantic import BaseModel
967
+ import mesh
968
+
969
+ class ChatResponse(BaseModel):
970
+ answer: str
971
+ confidence: float
972
+
973
+ @mesh.llm(
974
+ filter={"capability": "document", "tags": ["pdf"]},
975
+ provider="claude",
976
+ model="claude-3-5-sonnet-20241022"
977
+ )
978
+ @mesh.tool(capability="chat")
979
+ def chat(message: str, llm: mesh.MeshLlmAgent = None) -> ChatResponse:
980
+ llm.set_system_prompt("You are a helpful assistant.")
981
+ return llm(message)
982
+
983
+ Args:
984
+ filter: Tool filter (string, dict, or list of mixed)
985
+ filter_mode: Filter mode ("all", "best_match", "*")
986
+ provider: LLM provider ("claude", "openai", "custom")
987
+ model: Model name (can be overridden by MESH_LLM_MODEL)
988
+ api_key: API key (can be overridden by provider-specific env vars)
989
+ max_iterations: Max agentic loop iterations (can be overridden by MESH_LLM_MAX_ITERATIONS)
990
+ system_prompt: Default system prompt
991
+ system_prompt_file: Path to Jinja2 template file
992
+ **kwargs: Additional configuration
993
+
994
+ Returns:
995
+ Decorated function with MeshLlmAgent injection
996
+
997
+ Raises:
998
+ ValueError: If no MeshLlmAgent parameter found
999
+ UserWarning: If multiple MeshLlmAgent parameters or non-Pydantic return type
1000
+ """
1001
+ import inspect
1002
+ import warnings
1003
+
1004
+ def decorator(func: T) -> T:
1005
+ # Step 1: Resolve configuration with hierarchy (ENV > decorator params)
1006
+ # Phase 1: Detect file:// prefix for template files
1007
+ is_template = False
1008
+ template_path = None
1009
+
1010
+ if system_prompt:
1011
+ # Check for file:// prefix
1012
+ if system_prompt.startswith("file://"):
1013
+ is_template = True
1014
+ template_path = system_prompt[7:] # Strip "file://" prefix
1015
+ # Auto-detect .jinja2 or .j2 extension without file:// prefix
1016
+ elif system_prompt.endswith(".jinja2") or system_prompt.endswith(".j2"):
1017
+ is_template = True
1018
+ template_path = system_prompt
1019
+
1020
+ # Backward compatibility: system_prompt_file (deprecated)
1021
+ if system_prompt_file:
1022
+ logger.warning(
1023
+ f"⚠️ @mesh.llm: 'system_prompt_file' parameter is deprecated. "
1024
+ f"Use 'system_prompt=\"file://{system_prompt_file}\"' instead."
1025
+ )
1026
+ if not is_template: # Only use if system_prompt didn't specify a template
1027
+ is_template = True
1028
+ template_path = system_prompt_file
1029
+
1030
+ # Validate context_param usage
1031
+ if context_param and not is_template:
1032
+ logger.warning(
1033
+ f"⚠️ @mesh.llm: 'context_param' specified for function '{func.__name__}' "
1034
+ f"but system_prompt is not a template (no file:// prefix or .jinja2/.j2 extension). "
1035
+ f"Context parameter will be ignored."
1036
+ )
1037
+
1038
+ resolved_config = {
1039
+ "filter": filter,
1040
+ "filter_mode": get_config_value(
1041
+ "MESH_LLM_FILTER_MODE",
1042
+ override=filter_mode,
1043
+ default="all",
1044
+ rule=ValidationRule.STRING_RULE,
1045
+ ),
1046
+ "provider": get_config_value(
1047
+ "MESH_LLM_PROVIDER",
1048
+ override=provider,
1049
+ default="claude",
1050
+ rule=ValidationRule.STRING_RULE,
1051
+ ),
1052
+ "model": get_config_value(
1053
+ "MESH_LLM_MODEL",
1054
+ override=model,
1055
+ default=None,
1056
+ rule=ValidationRule.STRING_RULE,
1057
+ ),
1058
+ "api_key": api_key, # Will be resolved from provider-specific env vars later
1059
+ "max_iterations": get_config_value(
1060
+ "MESH_LLM_MAX_ITERATIONS",
1061
+ override=max_iterations,
1062
+ default=10,
1063
+ rule=ValidationRule.NONZERO_RULE,
1064
+ ),
1065
+ "system_prompt": system_prompt,
1066
+ "system_prompt_file": system_prompt_file,
1067
+ # Phase 1: Template metadata
1068
+ "is_template": is_template,
1069
+ "template_path": template_path,
1070
+ "context_param": context_param,
1071
+ }
1072
+ resolved_config.update(kwargs)
1073
+
1074
+ # Step 2: Extract output type from return annotation
1075
+ sig = inspect.signature(func)
1076
+ return_annotation = sig.return_annotation
1077
+
1078
+ output_type = None
1079
+ if return_annotation and return_annotation != inspect.Signature.empty:
1080
+ output_type = return_annotation
1081
+
1082
+ # Warn if not a Pydantic model
1083
+ try:
1084
+ from pydantic import BaseModel
1085
+
1086
+ if not (
1087
+ inspect.isclass(output_type) and issubclass(output_type, BaseModel)
1088
+ ):
1089
+ warnings.warn(
1090
+ f"Function '{func.__name__}' decorated with @mesh.llm should return a Pydantic BaseModel subclass, "
1091
+ f"got {output_type}. This may cause validation errors at runtime.",
1092
+ UserWarning,
1093
+ stacklevel=2,
1094
+ )
1095
+ except ImportError:
1096
+ pass # Pydantic not available, skip validation
1097
+
1098
+ # Step 3: Find MeshLlmAgent parameter
1099
+ from mesh.types import MeshLlmAgent
1100
+
1101
+ llm_params = []
1102
+ for param_name, param in sig.parameters.items():
1103
+ if param.annotation == MeshLlmAgent or (
1104
+ hasattr(param.annotation, "__origin__")
1105
+ and param.annotation.__origin__ == MeshLlmAgent
1106
+ ):
1107
+ llm_params.append(param_name)
1108
+
1109
+ if not llm_params:
1110
+ raise ValueError(
1111
+ f"Function '{func.__name__}' decorated with @mesh.llm must have at least one parameter "
1112
+ f"of type 'mesh.MeshLlmAgent'. Example: def {func.__name__}(..., llm: mesh.MeshLlmAgent = None)"
1113
+ )
1114
+
1115
+ if len(llm_params) > 1:
1116
+ warnings.warn(
1117
+ f"Function '{func.__name__}' has multiple MeshLlmAgent parameters: {llm_params}. "
1118
+ f"Only the first parameter '{llm_params[0]}' will be injected. "
1119
+ f"Additional parameters will be ignored.",
1120
+ UserWarning,
1121
+ stacklevel=2,
1122
+ )
1123
+
1124
+ param_name = llm_params[0]
1125
+
1126
+ # Step 4: Generate unique function ID
1127
+ function_id = f"{func.__name__}_{uuid.uuid4().hex[:8]}"
1128
+
1129
+ # Step 5: Register with DecoratorRegistry
1130
+ DecoratorRegistry.register_mesh_llm(
1131
+ func=func,
1132
+ config=resolved_config,
1133
+ output_type=output_type,
1134
+ param_name=param_name,
1135
+ function_id=function_id,
1136
+ )
1137
+
1138
+ logger.debug(
1139
+ f"@mesh.llm registered: {func.__name__} "
1140
+ f"(provider={resolved_config['provider']}, param={param_name}, filter={filter})"
1141
+ )
1142
+
1143
+ # Step 6: Enhance existing wrapper from @mesh.tool (if present)
1144
+ # or create new wrapper
1145
+ #
1146
+ # This approach:
1147
+ # - Reuses the wrapper created by @mesh.tool (if present)
1148
+ # - Avoids creating multiple wrapper layers
1149
+ # - Ensures FastMCP caches the SAME wrapper instance we update later
1150
+ # - Combines both DI injection and LLM injection in the same wrapper
1151
+
1152
+ # Check if there's an existing wrapper from @mesh.tool
1153
+ mesh_tools = DecoratorRegistry.get_mesh_tools()
1154
+ existing_wrapper = None
1155
+
1156
+ if func.__name__ in mesh_tools:
1157
+ existing_wrapper = mesh_tools[func.__name__].function
1158
+ logger.info(
1159
+ f"🔗 Found existing @mesh.tool wrapper for '{func.__name__}' at {hex(id(existing_wrapper))} - enhancing it"
1160
+ )
1161
+
1162
+ # Trigger debounced processing
1163
+ _trigger_debounced_processing()
1164
+
1165
+ if existing_wrapper:
1166
+ # ENHANCE the existing wrapper with LLM attributes
1167
+ logger.info(
1168
+ f"✨ Enhancing existing wrapper with LLM injection for '{func.__name__}'"
1169
+ )
1170
+
1171
+ # Store the original wrapped function if not already stored
1172
+ if not hasattr(existing_wrapper, "__wrapped__"):
1173
+ existing_wrapper.__wrapped__ = func
1174
+
1175
+ # Store the original call behavior to preserve DI injection
1176
+ original_call = existing_wrapper
1177
+
1178
+ # Create enhanced wrapper that does BOTH DI injection and LLM injection
1179
+ @wraps(func)
1180
+ def combined_injection_wrapper(*args, **kwargs):
1181
+ """Wrapper that injects both MeshLlmAgent and DI parameters."""
1182
+ # Inject LLM parameter if not provided or if it's None
1183
+ if param_name not in kwargs or kwargs.get(param_name) is None:
1184
+ kwargs[param_name] = combined_injection_wrapper._mesh_llm_agent
1185
+ # Then call the original wrapper (which handles DI injection)
1186
+ return original_call(*args, **kwargs)
1187
+
1188
+ # Add LLM metadata attributes to combined wrapper
1189
+ combined_injection_wrapper._mesh_llm_agent = (
1190
+ None # Will be updated during heartbeat
1191
+ )
1192
+ combined_injection_wrapper._mesh_llm_param_name = param_name
1193
+ combined_injection_wrapper._mesh_llm_function_id = function_id
1194
+ combined_injection_wrapper._mesh_llm_config = resolved_config
1195
+ combined_injection_wrapper._mesh_llm_output_type = output_type
1196
+ combined_injection_wrapper.__wrapped__ = func
1197
+
1198
+ # Create update method for heartbeat that updates the COMBINED wrapper
1199
+ def update_llm_agent(agent):
1200
+ combined_injection_wrapper._mesh_llm_agent = agent
1201
+ logger.info(
1202
+ f"🔄 Updated MeshLlmAgent on combined wrapper for {func.__name__} (function_id={function_id})"
1203
+ )
1204
+
1205
+ combined_injection_wrapper._mesh_update_llm_agent = update_llm_agent
1206
+
1207
+ # Copy any other mesh attributes from existing wrapper
1208
+ for attr in dir(existing_wrapper):
1209
+ if attr.startswith("_mesh_") and not hasattr(
1210
+ combined_injection_wrapper, attr
1211
+ ):
1212
+ try:
1213
+ setattr(
1214
+ combined_injection_wrapper,
1215
+ attr,
1216
+ getattr(existing_wrapper, attr),
1217
+ )
1218
+ except AttributeError:
1219
+ pass # Some attributes might not be settable
1220
+
1221
+ # Update DecoratorRegistry with the combined wrapper
1222
+ DecoratorRegistry.update_mesh_llm_function(
1223
+ function_id, combined_injection_wrapper
1224
+ )
1225
+ DecoratorRegistry.update_mesh_tool_function(
1226
+ func.__name__, combined_injection_wrapper
1227
+ )
1228
+
1229
+ logger.info(
1230
+ f"✅ Enhanced wrapper for '{func.__name__}' with combined DI + LLM injection at {hex(id(combined_injection_wrapper))}"
1231
+ )
1232
+
1233
+ # Return the enhanced wrapper
1234
+ return combined_injection_wrapper
1235
+
1236
+ else:
1237
+ # FALLBACK: Create new wrapper if no existing @mesh.tool wrapper found
1238
+ logger.info(
1239
+ f"📝 No existing wrapper found for '{func.__name__}' - creating new LLM wrapper"
1240
+ )
1241
+
1242
+ @wraps(func)
1243
+ def llm_injection_wrapper(*args, **kwargs):
1244
+ """Wrapper that injects MeshLlmAgent parameter."""
1245
+ # Inject llm parameter if not provided or if it's None
1246
+ if param_name not in kwargs or kwargs.get(param_name) is None:
1247
+ kwargs[param_name] = llm_injection_wrapper._mesh_llm_agent
1248
+ return func(*args, **kwargs)
1249
+
1250
+ # Create update method for heartbeat - updates the wrapper, not func
1251
+ def update_llm_agent(agent):
1252
+ llm_injection_wrapper._mesh_llm_agent = agent
1253
+ logger.info(
1254
+ f"🔄 Updated MeshLlmAgent for {func.__name__} (function_id={function_id})"
1255
+ )
1256
+
1257
+ # Copy all metadata attributes to the wrapper
1258
+ llm_injection_wrapper._mesh_llm_agent = None
1259
+ llm_injection_wrapper._mesh_llm_param_name = param_name
1260
+ llm_injection_wrapper._mesh_llm_function_id = function_id
1261
+ llm_injection_wrapper._mesh_llm_config = resolved_config
1262
+ llm_injection_wrapper._mesh_llm_output_type = output_type
1263
+ llm_injection_wrapper._mesh_update_llm_agent = update_llm_agent
1264
+
1265
+ # Update DecoratorRegistry with the wrapper
1266
+ DecoratorRegistry.update_mesh_llm_function(
1267
+ function_id, llm_injection_wrapper
1268
+ )
1269
+
1270
+ # Return the new wrapper
1271
+ return llm_injection_wrapper
1272
+
1273
+ return decorator
mesh/types.py CHANGED
@@ -180,113 +180,95 @@ class McpMeshAgent(Protocol):
180
180
  }
181
181
 
182
182
 
183
- class McpAgent(Protocol):
183
+ class MeshLlmAgent(Protocol):
184
184
  """
185
- DEPRECATED: Use McpMeshAgent instead.
186
-
187
- This type has been unified with McpMeshAgent. All features previously exclusive
188
- to McpAgent are now available in McpMeshAgent using FastMCP's superior client.
189
-
190
- Migration:
191
- # Old way (deprecated)
192
- def process_files(file_service: McpAgent) -> str:
193
- pass
194
-
195
- # New way (recommended)
196
- def process_files(file_service: McpMeshAgent) -> str:
197
- pass
198
-
199
- McpMeshAgent now provides all MCP protocol features including streaming,
200
- session management, and CallToolResult objects via FastMCP client.
185
+ LLM agent proxy with automatic agentic loop.
186
+
187
+ This protocol defines the interface for LLM agents that are automatically injected
188
+ by the @mesh.llm decorator. The proxy handles the entire agentic loop internally:
189
+ - Tool formatting for provider (Claude, OpenAI, etc.)
190
+ - LLM API calls
191
+ - Tool execution via MCP proxies
192
+ - Response parsing to Pydantic models
193
+
194
+ The MeshLlmAgent is injected by the mesh framework and configured via the
195
+ @mesh.llm decorator. Users only need to call the proxy with their message.
196
+
197
+ Usage Example:
198
+ from pydantic import BaseModel
199
+ import mesh
200
+
201
+ class ChatResponse(BaseModel):
202
+ answer: str
203
+ confidence: float
204
+
205
+ @mesh.llm(
206
+ filter={"capability": "document", "tags": ["pdf"]},
207
+ provider="claude",
208
+ model="claude-3-5-sonnet-20241022"
209
+ )
210
+ @mesh.tool(capability="chat")
211
+ def chat(message: str, llm: MeshLlmAgent = None) -> ChatResponse:
212
+ # Optional: Override system prompt
213
+ llm.set_system_prompt("You are a helpful document assistant.")
214
+
215
+ # Execute automatic agentic loop
216
+ return llm(message)
217
+
218
+ Configuration Hierarchy:
219
+ - Decorator parameters provide defaults
220
+ - Environment variables override decorator settings:
221
+ * MESH_LLM_PROVIDER: Override provider
222
+ * MESH_LLM_MODEL: Override model
223
+ * ANTHROPIC_API_KEY: Claude API key
224
+ * OPENAI_API_KEY: OpenAI API key
225
+ * MESH_LLM_MAX_ITERATIONS: Override max iterations
226
+
227
+ The proxy is automatically injected with:
228
+ - Filtered tools from registry (based on @mesh.llm filter)
229
+ - Provider configuration (provider, model, api_key)
230
+ - Output type (inferred from function return annotation)
231
+ - System prompt (from decorator or file)
201
232
  """
202
233
 
203
- # Basic compatibility with McpMeshAgent
204
- def __call__(self, arguments: Optional[dict[str, Any]] = None) -> Any:
205
- """Call the bound remote function (McpMeshAgent compatibility)."""
206
- ...
207
-
208
- def invoke(self, arguments: Optional[dict[str, Any]] = None) -> Any:
209
- """Explicitly invoke the bound remote function (McpMeshAgent compatibility)."""
210
- ...
211
-
212
- # Vanilla MCP Protocol Methods (100% compatibility)
213
- async def list_tools(self) -> list:
214
- """List available tools from remote agent (vanilla MCP method)."""
215
- ...
216
-
217
- async def list_resources(self) -> list:
218
- """List available resources from remote agent (vanilla MCP method)."""
219
- ...
220
-
221
- async def read_resource(self, uri: str) -> Any:
222
- """Read resource contents from remote agent (vanilla MCP method)."""
223
- ...
224
-
225
- async def list_prompts(self) -> list:
226
- """List available prompts from remote agent (vanilla MCP method)."""
227
- ...
228
-
229
- async def get_prompt(self, name: str, arguments: Optional[dict] = None) -> Any:
230
- """Get prompt template from remote agent (vanilla MCP method)."""
231
- ...
232
-
233
- # Streaming Support - THE BREAKTHROUGH METHOD!
234
- async def call_tool_streaming(
235
- self, name: str, arguments: dict | None = None
236
- ) -> AsyncIterator[dict]:
234
+ def set_system_prompt(self, prompt: str) -> None:
237
235
  """
238
- Call a tool with streaming response using FastMCP's text/event-stream.
239
-
240
- This enables multihop streaming (A→B→C chains) by leveraging FastMCP's
241
- built-in streaming support with Accept: text/event-stream header.
236
+ Override the system prompt at runtime.
242
237
 
243
238
  Args:
244
- name: Tool name to call
245
- arguments: Tool arguments
246
-
247
- Yields:
248
- Streaming response chunks as dictionaries
249
- """
250
- ...
239
+ prompt: System prompt to use for LLM calls
251
240
 
252
- # Phase 6: Explicit Session Management
253
- async def create_session(self) -> str:
254
- """
255
- Create a new session and return session ID.
256
-
257
- For Phase 6 explicit session management. In Phase 8, this will be
258
- automated based on @mesh.tool(session_required=True) annotations.
259
-
260
- Returns:
261
- New session ID string
241
+ Example:
242
+ llm.set_system_prompt("You are an expert document analyst.")
262
243
  """
263
244
  ...
264
245
 
265
- async def call_with_session(self, session_id: str, **kwargs) -> Any:
246
+ def __call__(self, message: str, **kwargs) -> Any:
266
247
  """
267
- Call tool with explicit session ID for stateful operations.
248
+ Execute automatic agentic loop and return typed response.
268
249
 
269
- This ensures all calls with the same session_id route to the same
270
- agent instance for session affinity.
250
+ This method handles the complete agentic loop:
251
+ 1. Format tools for provider (via LiteLLM)
252
+ 2. Call LLM API with tools
253
+ 3. If tool_use: execute via MCP proxies, loop back to LLM
254
+ 4. If final response: parse into output type (Pydantic model)
255
+ 5. Return typed response
271
256
 
272
257
  Args:
273
- session_id: Session ID to include in request headers
274
- **kwargs: Tool arguments to pass
258
+ message: User message to send to LLM
259
+ **kwargs: Additional context passed to LLM (provider-specific)
275
260
 
276
261
  Returns:
277
- Tool response
278
- """
279
- ...
262
+ Pydantic model instance (type inferred from function return annotation)
280
263
 
281
- async def close_session(self, session_id: str) -> bool:
282
- """
283
- Close session and cleanup session state.
284
-
285
- Args:
286
- session_id: Session ID to close
264
+ Raises:
265
+ MaxIterationsError: If max_iterations exceeded without final response
266
+ ValidationError: If LLM response doesn't match output type schema
267
+ ToolExecutionError: If tool execution fails during agentic loop
287
268
 
288
- Returns:
289
- True if session was closed successfully
269
+ Example:
270
+ response = llm("Analyze this document: /path/to/file.pdf")
271
+ # Returns ChatResponse(answer="...", confidence=0.95)
290
272
  """
291
273
  ...
292
274
 
@@ -299,13 +281,15 @@ class McpAgent(Protocol):
299
281
  handler: Any,
300
282
  ) -> core_schema.CoreSchema:
301
283
  """
302
- Custom Pydantic core schema for McpAgent.
284
+ Custom Pydantic core schema for MeshLlmAgent.
285
+
286
+ This makes MeshLlmAgent parameters appear as optional/nullable in MCP schemas,
287
+ preventing serialization errors while maintaining type safety for dependency injection.
303
288
 
304
- Similar to McpMeshAgent, this makes McpAgent parameters appear as
305
- optional/nullable in MCP schemas, preventing serialization errors
306
- while maintaining type safety for dependency injection.
289
+ The MeshLlmAgentInjector will replace None values with actual proxy objects
290
+ at runtime, so MCP callers never need to provide these parameters.
307
291
  """
308
- # Treat McpAgent as an optional Any type for MCP serialization
292
+ # Treat MeshLlmAgent as an optional Any type for MCP serialization
309
293
  return core_schema.with_default_schema(
310
294
  core_schema.nullable_schema(core_schema.any_schema()),
311
295
  default=None,
@@ -320,3 +304,70 @@ class McpAgent(Protocol):
320
304
  "schema": {"type": "nullable", "schema": {"type": "any"}},
321
305
  "default": None,
322
306
  }
307
+
308
+
309
+ # Import BaseModel for MeshContextModel
310
+ try:
311
+ from pydantic import BaseModel
312
+
313
+ class MeshContextModel(BaseModel):
314
+ """
315
+ Base model for LLM prompt template contexts.
316
+
317
+ Use this to create type-safe, validated context models for
318
+ Jinja2 prompt templates in @mesh.llm decorated functions.
319
+
320
+ The MeshContextModel provides:
321
+ - Type safety via Pydantic validation
322
+ - Field descriptions for LLM schema generation
323
+ - Strict mode (extra fields forbidden)
324
+ - Automatic .model_dump() for template rendering
325
+
326
+ Example:
327
+ from mesh import MeshContextModel
328
+ from pydantic import Field
329
+
330
+ class ChatContext(MeshContextModel):
331
+ user_name: str = Field(description="Name of the user")
332
+ domain: str = Field(description="Chat domain: support, sales, etc.")
333
+ expertise_level: str = Field(
334
+ default="beginner",
335
+ description="User expertise: beginner, intermediate, expert"
336
+ )
337
+
338
+ @mesh.llm(
339
+ system_prompt="file://prompts/chat.jinja2",
340
+ context_param="ctx"
341
+ )
342
+ @mesh.tool(capability="chat")
343
+ def chat(message: str, ctx: ChatContext, llm: MeshLlmAgent = None):
344
+ return llm(message) # Template auto-rendered with ctx!
345
+
346
+ Field Descriptions in LLM Chains:
347
+ When a specialist LLM agent has MeshContextModel parameters, the Field
348
+ descriptions are extracted and included in the tool schema sent to
349
+ calling LLM agents. This helps orchestrator LLMs construct context
350
+ objects correctly.
351
+
352
+ Without descriptions:
353
+ {"domain": "string"} # LLM doesn't know what this means
354
+
355
+ With descriptions:
356
+ {"domain": {"type": "string", "description": "Chat domain: support, sales"}}
357
+ # LLM understands what to provide!
358
+
359
+ Template Rendering:
360
+ When used with @mesh.llm(system_prompt="file://..."), the context is
361
+ automatically converted to a dict via .model_dump() and passed to the
362
+ Jinja2 template renderer.
363
+ """
364
+
365
+ class Config:
366
+ extra = "forbid" # Strict mode - reject unexpected fields
367
+
368
+ except ImportError:
369
+ # Fallback if Pydantic not available (should not happen in practice)
370
+ class MeshContextModel: # type: ignore
371
+ """Placeholder when Pydantic unavailable."""
372
+
373
+ pass