mcp-mesh 0.5.7__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/base_injector.py +171 -0
  3. _mcp_mesh/engine/decorator_registry.py +162 -35
  4. _mcp_mesh/engine/dependency_injector.py +105 -19
  5. _mcp_mesh/engine/http_wrapper.py +5 -22
  6. _mcp_mesh/engine/llm_config.py +45 -0
  7. _mcp_mesh/engine/llm_errors.py +115 -0
  8. _mcp_mesh/engine/mesh_llm_agent.py +626 -0
  9. _mcp_mesh/engine/mesh_llm_agent_injector.py +617 -0
  10. _mcp_mesh/engine/provider_handlers/__init__.py +20 -0
  11. _mcp_mesh/engine/provider_handlers/base_provider_handler.py +122 -0
  12. _mcp_mesh/engine/provider_handlers/claude_handler.py +138 -0
  13. _mcp_mesh/engine/provider_handlers/generic_handler.py +156 -0
  14. _mcp_mesh/engine/provider_handlers/openai_handler.py +163 -0
  15. _mcp_mesh/engine/provider_handlers/provider_handler_registry.py +167 -0
  16. _mcp_mesh/engine/response_parser.py +205 -0
  17. _mcp_mesh/engine/signature_analyzer.py +229 -99
  18. _mcp_mesh/engine/tool_executor.py +169 -0
  19. _mcp_mesh/engine/tool_schema_builder.py +126 -0
  20. _mcp_mesh/engine/unified_mcp_proxy.py +14 -12
  21. _mcp_mesh/generated/.openapi-generator/FILES +7 -0
  22. _mcp_mesh/generated/.openapi-generator-ignore +0 -1
  23. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +7 -16
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +7 -0
  25. _mcp_mesh/generated/mcp_mesh_registry_client/models/agent_info.py +11 -1
  26. _mcp_mesh/generated/mcp_mesh_registry_client/models/dependency_resolution_info.py +108 -0
  27. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_provider.py +95 -0
  28. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +111 -0
  29. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +141 -0
  30. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +93 -0
  31. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +103 -0
  32. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +1 -1
  33. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +35 -1
  34. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +11 -1
  35. _mcp_mesh/generated/mcp_mesh_registry_client/models/resolved_llm_provider.py +112 -0
  36. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +9 -72
  37. _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +3 -3
  38. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +35 -10
  39. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +7 -4
  40. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +260 -0
  41. _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +118 -35
  42. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +8 -1
  43. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +111 -5
  44. _mcp_mesh/pipeline/mcp_startup/server_discovery.py +77 -48
  45. _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +2 -2
  46. _mcp_mesh/pipeline/mcp_startup/startup_pipeline.py +2 -2
  47. _mcp_mesh/shared/health_check_cache.py +246 -0
  48. _mcp_mesh/shared/registry_client_wrapper.py +87 -4
  49. _mcp_mesh/utils/fastmcp_schema_extractor.py +476 -0
  50. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.1.dist-info}/METADATA +1 -1
  51. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.1.dist-info}/RECORD +57 -32
  52. mesh/__init__.py +18 -4
  53. mesh/decorators.py +439 -31
  54. mesh/helpers.py +259 -0
  55. mesh/types.py +197 -97
  56. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.1.dist-info}/WHEEL +0 -0
  57. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.1.dist-info}/licenses/LICENSE +0 -0
mesh/helpers.py ADDED
@@ -0,0 +1,259 @@
1
+ """
2
+ Helper decorators for common mesh patterns.
3
+
4
+ This module provides convenience decorators that build on top of the core
5
+ mesh decorators to simplify common patterns like zero-code LLM providers.
6
+ """
7
+
8
+ import logging
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def llm_provider(
15
+ model: str,
16
+ capability: str = "llm",
17
+ tags: Optional[list[str]] = None,
18
+ version: str = "1.0.0",
19
+ **litellm_kwargs: Any,
20
+ ):
21
+ """
22
+ Zero-code LLM provider decorator.
23
+
24
+ Creates a mesh-registered LLM provider that automatically:
25
+ - Registers as MCP tool (@app.tool) for direct MCP calls
26
+ - Registers in mesh network (@mesh.tool) for dependency injection
27
+ - Wraps LiteLLM with standard MeshLlmRequest interface
28
+ - Returns raw string response (caller handles parsing)
29
+
30
+ The decorated function becomes a placeholder - the decorator generates
31
+ a process_chat(request: MeshLlmRequest) -> str function that handles
32
+ all LLM provider logic.
33
+
34
+ Args:
35
+ model: LiteLLM model name (e.g., "anthropic/claude-sonnet-4-5")
36
+ capability: Capability name for mesh registration (default: "llm")
37
+ tags: Tags for mesh registration (e.g., ["claude", "fast", "+budget"])
38
+ version: Version string for mesh registration (default: "1.0.0")
39
+ **litellm_kwargs: Additional kwargs to pass to litellm.completion()
40
+
41
+ Usage:
42
+ from fastmcp import FastMCP
43
+ import mesh
44
+
45
+ app = FastMCP("LLM Provider")
46
+
47
+ @mesh.llm_provider(
48
+ model="anthropic/claude-sonnet-4-5",
49
+ capability="llm",
50
+ tags=["claude", "test"],
51
+ version="1.0.0",
52
+ )
53
+ def claude_provider():
54
+ '''Zero-code Claude provider.'''
55
+ pass # Implementation is in the decorator
56
+
57
+ @mesh.agent(name="my-provider", auto_run=True)
58
+ class MyProviderAgent:
59
+ pass
60
+
61
+ The generated process_chat function signature:
62
+ def process_chat(request: MeshLlmRequest) -> str:
63
+ '''
64
+ Auto-generated LLM handler.
65
+
66
+ Args:
67
+ request: MeshLlmRequest with messages, tools, model_params
68
+
69
+ Returns:
70
+ Raw LLM response content as string
71
+ '''
72
+
73
+ Testing:
74
+ # Direct MCP call
75
+ curl -X POST http://localhost:9019/mcp \\
76
+ -H "Content-Type: application/json" \\
77
+ -d '{
78
+ "jsonrpc": "2.0",
79
+ "id": 1,
80
+ "method": "tools/call",
81
+ "params": {
82
+ "name": "process_chat",
83
+ "arguments": {
84
+ "request": {
85
+ "messages": [
86
+ {"role": "system", "content": "You are helpful."},
87
+ {"role": "user", "content": "Say hello."}
88
+ ]
89
+ }
90
+ }
91
+ }
92
+ }'
93
+
94
+ Raises:
95
+ RuntimeError: If FastMCP 'app' not found in module
96
+ ImportError: If litellm not installed
97
+ """
98
+
99
+ def decorator(func):
100
+ # Import here to avoid circular imports
101
+ import sys
102
+
103
+ from mesh import tool
104
+ from mesh.types import MeshLlmRequest
105
+
106
+ # Find FastMCP app in current module
107
+ current_module = sys.modules.get(func.__module__)
108
+ if not current_module or not hasattr(current_module, "app"):
109
+ raise RuntimeError(
110
+ f"@mesh.llm_provider requires FastMCP 'app' in module {func.__module__}. "
111
+ f"Example: app = FastMCP('LLM Provider')"
112
+ )
113
+
114
+ app = current_module.app
115
+
116
+ # Extract vendor from model name using LiteLLM
117
+ vendor = "unknown"
118
+ try:
119
+ import litellm
120
+
121
+ _, vendor, _, _ = litellm.get_llm_provider(model=model)
122
+ logger.info(
123
+ f"✅ Extracted vendor '{vendor}' from model '{model}' "
124
+ f"using LiteLLM detection"
125
+ )
126
+ except (ImportError, AttributeError, ValueError, KeyError) as e:
127
+ # Fallback: try to extract from model prefix
128
+ # ImportError: litellm not installed
129
+ # AttributeError: get_llm_provider doesn't exist
130
+ # ValueError: invalid model format
131
+ # KeyError: model not in provider mapping
132
+ if "/" in model:
133
+ vendor = model.split("/")[0]
134
+ logger.warning(
135
+ f"⚠️ Could not extract vendor using LiteLLM ({e}), "
136
+ f"falling back to prefix extraction: '{vendor}'"
137
+ )
138
+ else:
139
+ logger.warning(
140
+ f"⚠️ Could not extract vendor from model '{model}', "
141
+ f"using 'unknown'"
142
+ )
143
+
144
+ # Generate the LLM handler function
145
+ def process_chat(request: MeshLlmRequest) -> dict[str, Any]:
146
+ """
147
+ Auto-generated LLM handler.
148
+
149
+ Args:
150
+ request: MeshLlmRequest with messages, tools, model_params
151
+
152
+ Returns:
153
+ Full message dict with content, role, and tool_calls (if present)
154
+ """
155
+ import litellm
156
+
157
+ # Build litellm.completion arguments
158
+ completion_args: dict[str, Any] = {
159
+ "model": model,
160
+ "messages": request.messages,
161
+ **litellm_kwargs,
162
+ }
163
+
164
+ # Add optional request parameters
165
+ if request.tools:
166
+ completion_args["tools"] = request.tools
167
+
168
+ if request.model_params:
169
+ completion_args.update(request.model_params)
170
+
171
+ # Call LiteLLM
172
+ try:
173
+ response = litellm.completion(**completion_args)
174
+ message = response.choices[0].message
175
+
176
+ # Build message dict with all necessary fields for agentic loop
177
+ # Handle content - it can be a string or list of content blocks
178
+ content = message.content
179
+ if isinstance(content, list):
180
+ # Extract text from content blocks (robust handling)
181
+ text_parts = []
182
+ for block in content:
183
+ if block is None:
184
+ continue # Skip None blocks
185
+ elif isinstance(block, dict):
186
+ # Extract text field, ensure it's a string
187
+ text_value = block.get("text", "")
188
+ text_parts.append(
189
+ str(text_value) if text_value is not None else ""
190
+ )
191
+ else:
192
+ # Convert any other type to string
193
+ try:
194
+ text_parts.append(str(block))
195
+ except Exception:
196
+ # If str() fails, skip this block
197
+ logger.warning(
198
+ f"Unable to convert content block to string: {type(block)}"
199
+ )
200
+ continue
201
+ content = "".join(text_parts)
202
+
203
+ message_dict: dict[str, Any] = {
204
+ "role": message.role,
205
+ "content": content if content else "",
206
+ }
207
+
208
+ # Include tool_calls if present (critical for agentic loop support!)
209
+ if hasattr(message, "tool_calls") and message.tool_calls:
210
+ message_dict["tool_calls"] = [
211
+ {
212
+ "id": tc.id,
213
+ "type": tc.type,
214
+ "function": {
215
+ "name": tc.function.name,
216
+ "arguments": tc.function.arguments,
217
+ },
218
+ }
219
+ for tc in message.tool_calls
220
+ ]
221
+
222
+ logger.info(
223
+ f"LLM provider {func.__name__} processed request "
224
+ f"(model={model}, messages={len(request.messages)}, "
225
+ f"tool_calls={len(message_dict.get('tool_calls', []))})"
226
+ )
227
+
228
+ return message_dict
229
+
230
+ except Exception as e:
231
+ logger.error(f"LLM provider {func.__name__} failed: {e}")
232
+ raise
233
+
234
+ # Preserve original function's docstring metadata
235
+ if func.__doc__:
236
+ process_chat.__doc__ = func.__doc__ + "\n\n" + (process_chat.__doc__ or "")
237
+
238
+ # CRITICAL: Apply @mesh.tool() FIRST (before FastMCP caches the function)
239
+ # This ensures mesh DI wrapper is in place when FastMCP caches the function
240
+ # Decorators are applied bottom-up, so mesh wrapper must be innermost
241
+ process_chat = tool(
242
+ capability=capability,
243
+ tags=tags,
244
+ version=version,
245
+ vendor=vendor, # Pass vendor to registry for provider handler selection
246
+ )(process_chat)
247
+
248
+ # Then apply @app.tool() for MCP registration (caches the wrapped version)
249
+ process_chat = app.tool()(process_chat)
250
+
251
+ logger.info(
252
+ f"✅ Created LLM provider '{func.__name__}' -> process_chat "
253
+ f"(model={model}, capability={capability}, tags={tags}, vendor={vendor})"
254
+ )
255
+
256
+ # Return the generated function (replaces the placeholder)
257
+ return process_chat
258
+
259
+ return decorator
mesh/types.py CHANGED
@@ -3,7 +3,8 @@ MCP Mesh type definitions for dependency injection.
3
3
  """
4
4
 
5
5
  from collections.abc import AsyncIterator
6
- from typing import Any, Optional, Protocol
6
+ from dataclasses import dataclass
7
+ from typing import Any, Dict, List, Optional, Protocol
7
8
 
8
9
  try:
9
10
  from pydantic_core import core_schema
@@ -180,113 +181,107 @@ class McpMeshAgent(Protocol):
180
181
  }
181
182
 
182
183
 
183
- class McpAgent(Protocol):
184
+ class MeshLlmAgent(Protocol):
184
185
  """
185
- DEPRECATED: Use McpMeshAgent instead.
186
-
187
- This type has been unified with McpMeshAgent. All features previously exclusive
188
- to McpAgent are now available in McpMeshAgent using FastMCP's superior client.
189
-
190
- Migration:
191
- # Old way (deprecated)
192
- def process_files(file_service: McpAgent) -> str:
193
- pass
194
-
195
- # New way (recommended)
196
- def process_files(file_service: McpMeshAgent) -> str:
197
- pass
198
-
199
- McpMeshAgent now provides all MCP protocol features including streaming,
200
- session management, and CallToolResult objects via FastMCP client.
186
+ LLM agent proxy with automatic agentic loop.
187
+
188
+ This protocol defines the interface for LLM agents that are automatically injected
189
+ by the @mesh.llm decorator. The proxy handles the entire agentic loop internally:
190
+ - Tool formatting for provider (Claude, OpenAI, etc.)
191
+ - LLM API calls
192
+ - Tool execution via MCP proxies
193
+ - Response parsing to Pydantic models
194
+
195
+ The MeshLlmAgent is injected by the mesh framework and configured via the
196
+ @mesh.llm decorator. Users only need to call the proxy with their message.
197
+
198
+ Usage Example:
199
+ from pydantic import BaseModel
200
+ import mesh
201
+
202
+ class ChatResponse(BaseModel):
203
+ answer: str
204
+ confidence: float
205
+
206
+ @mesh.llm(
207
+ filter={"capability": "document", "tags": ["pdf"]},
208
+ provider="claude",
209
+ model="claude-3-5-sonnet-20241022"
210
+ )
211
+ @mesh.tool(capability="chat")
212
+ def chat(message: str, llm: MeshLlmAgent = None) -> ChatResponse:
213
+ # Optional: Override system prompt
214
+ llm.set_system_prompt("You are a helpful document assistant.")
215
+
216
+ # Execute automatic agentic loop
217
+ return llm(message)
218
+
219
+ Configuration Hierarchy:
220
+ - Decorator parameters provide defaults
221
+ - Environment variables override decorator settings:
222
+ * MESH_LLM_PROVIDER: Override provider
223
+ * MESH_LLM_MODEL: Override model
224
+ * ANTHROPIC_API_KEY: Claude API key
225
+ * OPENAI_API_KEY: OpenAI API key
226
+ * MESH_LLM_MAX_ITERATIONS: Override max iterations
227
+
228
+ The proxy is automatically injected with:
229
+ - Filtered tools from registry (based on @mesh.llm filter)
230
+ - Provider configuration (provider, model, api_key)
231
+ - Output type (inferred from function return annotation)
232
+ - System prompt (from decorator or file)
201
233
  """
202
234
 
203
- # Basic compatibility with McpMeshAgent
204
- def __call__(self, arguments: Optional[dict[str, Any]] = None) -> Any:
205
- """Call the bound remote function (McpMeshAgent compatibility)."""
206
- ...
207
-
208
- def invoke(self, arguments: Optional[dict[str, Any]] = None) -> Any:
209
- """Explicitly invoke the bound remote function (McpMeshAgent compatibility)."""
210
- ...
211
-
212
- # Vanilla MCP Protocol Methods (100% compatibility)
213
- async def list_tools(self) -> list:
214
- """List available tools from remote agent (vanilla MCP method)."""
215
- ...
216
-
217
- async def list_resources(self) -> list:
218
- """List available resources from remote agent (vanilla MCP method)."""
219
- ...
220
-
221
- async def read_resource(self, uri: str) -> Any:
222
- """Read resource contents from remote agent (vanilla MCP method)."""
223
- ...
224
-
225
- async def list_prompts(self) -> list:
226
- """List available prompts from remote agent (vanilla MCP method)."""
227
- ...
228
-
229
- async def get_prompt(self, name: str, arguments: Optional[dict] = None) -> Any:
230
- """Get prompt template from remote agent (vanilla MCP method)."""
231
- ...
232
-
233
- # Streaming Support - THE BREAKTHROUGH METHOD!
234
- async def call_tool_streaming(
235
- self, name: str, arguments: dict | None = None
236
- ) -> AsyncIterator[dict]:
235
+ def set_system_prompt(self, prompt: str) -> None:
237
236
  """
238
- Call a tool with streaming response using FastMCP's text/event-stream.
239
-
240
- This enables multihop streaming (A→B→C chains) by leveraging FastMCP's
241
- built-in streaming support with Accept: text/event-stream header.
237
+ Override the system prompt at runtime.
242
238
 
243
239
  Args:
244
- name: Tool name to call
245
- arguments: Tool arguments
246
-
247
- Yields:
248
- Streaming response chunks as dictionaries
249
- """
250
- ...
251
-
252
- # Phase 6: Explicit Session Management
253
- async def create_session(self) -> str:
254
- """
255
- Create a new session and return session ID.
240
+ prompt: System prompt to use for LLM calls
256
241
 
257
- For Phase 6 explicit session management. In Phase 8, this will be
258
- automated based on @mesh.tool(session_required=True) annotations.
259
-
260
- Returns:
261
- New session ID string
242
+ Example:
243
+ llm.set_system_prompt("You are an expert document analyst.")
262
244
  """
263
245
  ...
264
246
 
265
- async def call_with_session(self, session_id: str, **kwargs) -> Any:
266
- """
267
- Call tool with explicit session ID for stateful operations.
268
-
269
- This ensures all calls with the same session_id route to the same
270
- agent instance for session affinity.
271
-
272
- Args:
273
- session_id: Session ID to include in request headers
274
- **kwargs: Tool arguments to pass
275
-
276
- Returns:
277
- Tool response
247
+ def __call__(self, message: str | list[dict[str, Any]], **kwargs) -> Any:
278
248
  """
279
- ...
249
+ Execute automatic agentic loop and return typed response.
280
250
 
281
- async def close_session(self, session_id: str) -> bool:
282
- """
283
- Close session and cleanup session state.
251
+ This method handles the complete agentic loop:
252
+ 1. Format tools for provider (via LiteLLM)
253
+ 2. Call LLM API with tools
254
+ 3. If tool_use: execute via MCP proxies, loop back to LLM
255
+ 4. If final response: parse into output type (Pydantic model)
256
+ 5. Return typed response
284
257
 
285
258
  Args:
286
- session_id: Session ID to close
259
+ message: Either:
260
+ - str: Single user message (will be wrapped in messages array)
261
+ - list[dict]: Full conversation history with messages in format
262
+ [{"role": "user|assistant|system", "content": "..."}]
263
+ **kwargs: Additional context passed to LLM (provider-specific)
287
264
 
288
265
  Returns:
289
- True if session was closed successfully
266
+ Pydantic model instance (type inferred from function return annotation)
267
+
268
+ Raises:
269
+ MaxIterationsError: If max_iterations exceeded without final response
270
+ ValidationError: If LLM response doesn't match output type schema
271
+ ToolExecutionError: If tool execution fails during agentic loop
272
+
273
+ Example (single-turn):
274
+ response = llm("Analyze this document: /path/to/file.pdf")
275
+ # Returns ChatResponse(answer="...", confidence=0.95)
276
+
277
+ Example (multi-turn):
278
+ messages = [
279
+ {"role": "user", "content": "Hello, I need help with Python."},
280
+ {"role": "assistant", "content": "I'd be happy to help! What do you need?"},
281
+ {"role": "user", "content": "How do I read a file?"}
282
+ ]
283
+ response = llm(messages)
284
+ # Returns ChatResponse with contextual answer
290
285
  """
291
286
  ...
292
287
 
@@ -299,13 +294,15 @@ class McpAgent(Protocol):
299
294
  handler: Any,
300
295
  ) -> core_schema.CoreSchema:
301
296
  """
302
- Custom Pydantic core schema for McpAgent.
297
+ Custom Pydantic core schema for MeshLlmAgent.
303
298
 
304
- Similar to McpMeshAgent, this makes McpAgent parameters appear as
305
- optional/nullable in MCP schemas, preventing serialization errors
306
- while maintaining type safety for dependency injection.
299
+ This makes MeshLlmAgent parameters appear as optional/nullable in MCP schemas,
300
+ preventing serialization errors while maintaining type safety for dependency injection.
301
+
302
+ The MeshLlmAgentInjector will replace None values with actual proxy objects
303
+ at runtime, so MCP callers never need to provide these parameters.
307
304
  """
308
- # Treat McpAgent as an optional Any type for MCP serialization
305
+ # Treat MeshLlmAgent as an optional Any type for MCP serialization
309
306
  return core_schema.with_default_schema(
310
307
  core_schema.nullable_schema(core_schema.any_schema()),
311
308
  default=None,
@@ -320,3 +317,106 @@ class McpAgent(Protocol):
320
317
  "schema": {"type": "nullable", "schema": {"type": "any"}},
321
318
  "default": None,
322
319
  }
320
+
321
+
322
+ # Import BaseModel for MeshContextModel
323
+ try:
324
+ from pydantic import BaseModel
325
+
326
+ class MeshContextModel(BaseModel):
327
+ """
328
+ Base model for LLM prompt template contexts.
329
+
330
+ Use this to create type-safe, validated context models for
331
+ Jinja2 prompt templates in @mesh.llm decorated functions.
332
+
333
+ The MeshContextModel provides:
334
+ - Type safety via Pydantic validation
335
+ - Field descriptions for LLM schema generation
336
+ - Strict mode (extra fields forbidden)
337
+ - Automatic .model_dump() for template rendering
338
+
339
+ Example:
340
+ from mesh import MeshContextModel
341
+ from pydantic import Field
342
+
343
+ class ChatContext(MeshContextModel):
344
+ user_name: str = Field(description="Name of the user")
345
+ domain: str = Field(description="Chat domain: support, sales, etc.")
346
+ expertise_level: str = Field(
347
+ default="beginner",
348
+ description="User expertise: beginner, intermediate, expert"
349
+ )
350
+
351
+ @mesh.llm(
352
+ system_prompt="file://prompts/chat.jinja2",
353
+ context_param="ctx"
354
+ )
355
+ @mesh.tool(capability="chat")
356
+ def chat(message: str, ctx: ChatContext, llm: MeshLlmAgent = None):
357
+ return llm(message) # Template auto-rendered with ctx!
358
+
359
+ Field Descriptions in LLM Chains:
360
+ When a specialist LLM agent has MeshContextModel parameters, the Field
361
+ descriptions are extracted and included in the tool schema sent to
362
+ calling LLM agents. This helps orchestrator LLMs construct context
363
+ objects correctly.
364
+
365
+ Without descriptions:
366
+ {"domain": "string"} # LLM doesn't know what this means
367
+
368
+ With descriptions:
369
+ {"domain": {"type": "string", "description": "Chat domain: support, sales"}}
370
+ # LLM understands what to provide!
371
+
372
+ Template Rendering:
373
+ When used with @mesh.llm(system_prompt="file://..."), the context is
374
+ automatically converted to a dict via .model_dump() and passed to the
375
+ Jinja2 template renderer.
376
+ """
377
+
378
+ class Config:
379
+ extra = "forbid" # Strict mode - reject unexpected fields
380
+
381
+ except ImportError:
382
+ # Fallback if Pydantic not available (should not happen in practice)
383
+ class MeshContextModel: # type: ignore
384
+ """Placeholder when Pydantic unavailable."""
385
+
386
+ pass
387
+
388
+
389
+ @dataclass
390
+ class MeshLlmRequest:
391
+ """
392
+ Standard LLM request format for mesh-delegated LLM calls.
393
+
394
+ This dataclass is used when delegating LLM calls to mesh-registered LLM provider
395
+ agents via @mesh.llm_provider. It standardizes the request format across the mesh.
396
+
397
+ Usage:
398
+ Provider side (automatic with @mesh.llm_provider):
399
+ @mesh.llm_provider(model="anthropic/claude-sonnet-4-5", capability="llm")
400
+ def claude_provider():
401
+ pass # Automatically handles MeshLlmRequest
402
+
403
+ Consumer side (future with provider=dict):
404
+ @mesh.llm(provider={"capability": "llm", "tags": ["claude"]})
405
+ def chat(message: str, llm: MeshLlmAgent = None):
406
+ return llm(message) # Converts to MeshLlmRequest internally
407
+
408
+ Attributes:
409
+ messages: List of message dicts with "role" and "content" keys (and optionally "tool_calls")
410
+ tools: Optional list of tool definitions (MCP format)
411
+ model_params: Optional parameters to pass to the model (temperature, max_tokens, etc.)
412
+ context: Optional arbitrary context data for debugging/tracing
413
+ request_id: Optional request ID for tracking
414
+ caller_agent: Optional agent name that initiated the request
415
+ """
416
+
417
+ messages: List[Dict[str, Any]] # Changed from Dict[str, str] to allow tool_calls
418
+ tools: Optional[List[Dict]] = None
419
+ model_params: Optional[Dict] = None
420
+ context: Optional[Dict] = None
421
+ request_id: Optional[str] = None
422
+ caller_agent: Optional[str] = None