mcp-mesh 0.6.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/decorator_registry.py +26 -2
  3. _mcp_mesh/engine/dependency_injector.py +14 -1
  4. _mcp_mesh/engine/llm_config.py +11 -7
  5. _mcp_mesh/engine/mesh_llm_agent.py +247 -61
  6. _mcp_mesh/engine/mesh_llm_agent_injector.py +130 -0
  7. _mcp_mesh/engine/provider_handlers/__init__.py +20 -0
  8. _mcp_mesh/engine/provider_handlers/base_provider_handler.py +122 -0
  9. _mcp_mesh/engine/provider_handlers/claude_handler.py +138 -0
  10. _mcp_mesh/engine/provider_handlers/generic_handler.py +156 -0
  11. _mcp_mesh/engine/provider_handlers/openai_handler.py +163 -0
  12. _mcp_mesh/engine/provider_handlers/provider_handler_registry.py +167 -0
  13. _mcp_mesh/engine/response_parser.py +3 -38
  14. _mcp_mesh/engine/tool_schema_builder.py +3 -2
  15. _mcp_mesh/generated/.openapi-generator/FILES +3 -0
  16. _mcp_mesh/generated/.openapi-generator-ignore +0 -1
  17. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +51 -97
  18. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +42 -72
  19. _mcp_mesh/generated/mcp_mesh_registry_client/models/agent_info.py +11 -1
  20. _mcp_mesh/generated/mcp_mesh_registry_client/models/dependency_resolution_info.py +108 -0
  21. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_provider.py +95 -0
  22. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +37 -58
  23. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +32 -63
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +30 -29
  25. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +41 -59
  26. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +51 -98
  27. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +70 -85
  28. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +51 -84
  29. _mcp_mesh/generated/mcp_mesh_registry_client/models/resolved_llm_provider.py +112 -0
  30. _mcp_mesh/pipeline/mcp_heartbeat/fast_heartbeat_check.py +3 -3
  31. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_orchestrator.py +35 -10
  32. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +1 -1
  33. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +77 -39
  34. _mcp_mesh/pipeline/mcp_startup/fastapiserver_setup.py +118 -35
  35. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +1 -1
  36. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +48 -3
  37. _mcp_mesh/pipeline/mcp_startup/server_discovery.py +77 -48
  38. _mcp_mesh/pipeline/mcp_startup/startup_orchestrator.py +2 -2
  39. _mcp_mesh/shared/health_check_cache.py +246 -0
  40. _mcp_mesh/shared/registry_client_wrapper.py +29 -2
  41. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.1.dist-info}/METADATA +1 -1
  42. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.1.dist-info}/RECORD +48 -37
  43. mesh/__init__.py +12 -2
  44. mesh/decorators.py +105 -39
  45. mesh/helpers.py +259 -0
  46. mesh/types.py +53 -4
  47. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.1.dist-info}/WHEEL +0 -0
  48. {mcp_mesh-0.6.0.dist-info → mcp_mesh-0.6.1.dist-info}/licenses/LICENSE +0 -0
mesh/helpers.py ADDED
@@ -0,0 +1,259 @@
1
+ """
2
+ Helper decorators for common mesh patterns.
3
+
4
+ This module provides convenience decorators that build on top of the core
5
+ mesh decorators to simplify common patterns like zero-code LLM providers.
6
+ """
7
+
8
+ import logging
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def llm_provider(
15
+ model: str,
16
+ capability: str = "llm",
17
+ tags: Optional[list[str]] = None,
18
+ version: str = "1.0.0",
19
+ **litellm_kwargs: Any,
20
+ ):
21
+ """
22
+ Zero-code LLM provider decorator.
23
+
24
+ Creates a mesh-registered LLM provider that automatically:
25
+ - Registers as MCP tool (@app.tool) for direct MCP calls
26
+ - Registers in mesh network (@mesh.tool) for dependency injection
27
+ - Wraps LiteLLM with standard MeshLlmRequest interface
28
+ - Returns raw string response (caller handles parsing)
29
+
30
+ The decorated function becomes a placeholder - the decorator generates
31
+ a process_chat(request: MeshLlmRequest) -> str function that handles
32
+ all LLM provider logic.
33
+
34
+ Args:
35
+ model: LiteLLM model name (e.g., "anthropic/claude-sonnet-4-5")
36
+ capability: Capability name for mesh registration (default: "llm")
37
+ tags: Tags for mesh registration (e.g., ["claude", "fast", "+budget"])
38
+ version: Version string for mesh registration (default: "1.0.0")
39
+ **litellm_kwargs: Additional kwargs to pass to litellm.completion()
40
+
41
+ Usage:
42
+ from fastmcp import FastMCP
43
+ import mesh
44
+
45
+ app = FastMCP("LLM Provider")
46
+
47
+ @mesh.llm_provider(
48
+ model="anthropic/claude-sonnet-4-5",
49
+ capability="llm",
50
+ tags=["claude", "test"],
51
+ version="1.0.0",
52
+ )
53
+ def claude_provider():
54
+ '''Zero-code Claude provider.'''
55
+ pass # Implementation is in the decorator
56
+
57
+ @mesh.agent(name="my-provider", auto_run=True)
58
+ class MyProviderAgent:
59
+ pass
60
+
61
+ The generated process_chat function signature:
62
+ def process_chat(request: MeshLlmRequest) -> str:
63
+ '''
64
+ Auto-generated LLM handler.
65
+
66
+ Args:
67
+ request: MeshLlmRequest with messages, tools, model_params
68
+
69
+ Returns:
70
+ Raw LLM response content as string
71
+ '''
72
+
73
+ Testing:
74
+ # Direct MCP call
75
+ curl -X POST http://localhost:9019/mcp \\
76
+ -H "Content-Type: application/json" \\
77
+ -d '{
78
+ "jsonrpc": "2.0",
79
+ "id": 1,
80
+ "method": "tools/call",
81
+ "params": {
82
+ "name": "process_chat",
83
+ "arguments": {
84
+ "request": {
85
+ "messages": [
86
+ {"role": "system", "content": "You are helpful."},
87
+ {"role": "user", "content": "Say hello."}
88
+ ]
89
+ }
90
+ }
91
+ }
92
+ }'
93
+
94
+ Raises:
95
+ RuntimeError: If FastMCP 'app' not found in module
96
+ ImportError: If litellm not installed
97
+ """
98
+
99
+ def decorator(func):
100
+ # Import here to avoid circular imports
101
+ import sys
102
+
103
+ from mesh import tool
104
+ from mesh.types import MeshLlmRequest
105
+
106
+ # Find FastMCP app in current module
107
+ current_module = sys.modules.get(func.__module__)
108
+ if not current_module or not hasattr(current_module, "app"):
109
+ raise RuntimeError(
110
+ f"@mesh.llm_provider requires FastMCP 'app' in module {func.__module__}. "
111
+ f"Example: app = FastMCP('LLM Provider')"
112
+ )
113
+
114
+ app = current_module.app
115
+
116
+ # Extract vendor from model name using LiteLLM
117
+ vendor = "unknown"
118
+ try:
119
+ import litellm
120
+
121
+ _, vendor, _, _ = litellm.get_llm_provider(model=model)
122
+ logger.info(
123
+ f"✅ Extracted vendor '{vendor}' from model '{model}' "
124
+ f"using LiteLLM detection"
125
+ )
126
+ except (ImportError, AttributeError, ValueError, KeyError) as e:
127
+ # Fallback: try to extract from model prefix
128
+ # ImportError: litellm not installed
129
+ # AttributeError: get_llm_provider doesn't exist
130
+ # ValueError: invalid model format
131
+ # KeyError: model not in provider mapping
132
+ if "/" in model:
133
+ vendor = model.split("/")[0]
134
+ logger.warning(
135
+ f"⚠️ Could not extract vendor using LiteLLM ({e}), "
136
+ f"falling back to prefix extraction: '{vendor}'"
137
+ )
138
+ else:
139
+ logger.warning(
140
+ f"⚠️ Could not extract vendor from model '{model}', "
141
+ f"using 'unknown'"
142
+ )
143
+
144
+ # Generate the LLM handler function
145
+ def process_chat(request: MeshLlmRequest) -> dict[str, Any]:
146
+ """
147
+ Auto-generated LLM handler.
148
+
149
+ Args:
150
+ request: MeshLlmRequest with messages, tools, model_params
151
+
152
+ Returns:
153
+ Full message dict with content, role, and tool_calls (if present)
154
+ """
155
+ import litellm
156
+
157
+ # Build litellm.completion arguments
158
+ completion_args: dict[str, Any] = {
159
+ "model": model,
160
+ "messages": request.messages,
161
+ **litellm_kwargs,
162
+ }
163
+
164
+ # Add optional request parameters
165
+ if request.tools:
166
+ completion_args["tools"] = request.tools
167
+
168
+ if request.model_params:
169
+ completion_args.update(request.model_params)
170
+
171
+ # Call LiteLLM
172
+ try:
173
+ response = litellm.completion(**completion_args)
174
+ message = response.choices[0].message
175
+
176
+ # Build message dict with all necessary fields for agentic loop
177
+ # Handle content - it can be a string or list of content blocks
178
+ content = message.content
179
+ if isinstance(content, list):
180
+ # Extract text from content blocks (robust handling)
181
+ text_parts = []
182
+ for block in content:
183
+ if block is None:
184
+ continue # Skip None blocks
185
+ elif isinstance(block, dict):
186
+ # Extract text field, ensure it's a string
187
+ text_value = block.get("text", "")
188
+ text_parts.append(
189
+ str(text_value) if text_value is not None else ""
190
+ )
191
+ else:
192
+ # Convert any other type to string
193
+ try:
194
+ text_parts.append(str(block))
195
+ except Exception:
196
+ # If str() fails, skip this block
197
+ logger.warning(
198
+ f"Unable to convert content block to string: {type(block)}"
199
+ )
200
+ continue
201
+ content = "".join(text_parts)
202
+
203
+ message_dict: dict[str, Any] = {
204
+ "role": message.role,
205
+ "content": content if content else "",
206
+ }
207
+
208
+ # Include tool_calls if present (critical for agentic loop support!)
209
+ if hasattr(message, "tool_calls") and message.tool_calls:
210
+ message_dict["tool_calls"] = [
211
+ {
212
+ "id": tc.id,
213
+ "type": tc.type,
214
+ "function": {
215
+ "name": tc.function.name,
216
+ "arguments": tc.function.arguments,
217
+ },
218
+ }
219
+ for tc in message.tool_calls
220
+ ]
221
+
222
+ logger.info(
223
+ f"LLM provider {func.__name__} processed request "
224
+ f"(model={model}, messages={len(request.messages)}, "
225
+ f"tool_calls={len(message_dict.get('tool_calls', []))})"
226
+ )
227
+
228
+ return message_dict
229
+
230
+ except Exception as e:
231
+ logger.error(f"LLM provider {func.__name__} failed: {e}")
232
+ raise
233
+
234
+ # Preserve original function's docstring metadata
235
+ if func.__doc__:
236
+ process_chat.__doc__ = func.__doc__ + "\n\n" + (process_chat.__doc__ or "")
237
+
238
+ # CRITICAL: Apply @mesh.tool() FIRST (before FastMCP caches the function)
239
+ # This ensures mesh DI wrapper is in place when FastMCP caches the function
240
+ # Decorators are applied bottom-up, so mesh wrapper must be innermost
241
+ process_chat = tool(
242
+ capability=capability,
243
+ tags=tags,
244
+ version=version,
245
+ vendor=vendor, # Pass vendor to registry for provider handler selection
246
+ )(process_chat)
247
+
248
+ # Then apply @app.tool() for MCP registration (caches the wrapped version)
249
+ process_chat = app.tool()(process_chat)
250
+
251
+ logger.info(
252
+ f"✅ Created LLM provider '{func.__name__}' -> process_chat "
253
+ f"(model={model}, capability={capability}, tags={tags}, vendor={vendor})"
254
+ )
255
+
256
+ # Return the generated function (replaces the placeholder)
257
+ return process_chat
258
+
259
+ return decorator
mesh/types.py CHANGED
@@ -3,7 +3,8 @@ MCP Mesh type definitions for dependency injection.
3
3
  """
4
4
 
5
5
  from collections.abc import AsyncIterator
6
- from typing import Any, Optional, Protocol
6
+ from dataclasses import dataclass
7
+ from typing import Any, Dict, List, Optional, Protocol
7
8
 
8
9
  try:
9
10
  from pydantic_core import core_schema
@@ -243,7 +244,7 @@ class MeshLlmAgent(Protocol):
243
244
  """
244
245
  ...
245
246
 
246
- def __call__(self, message: str, **kwargs) -> Any:
247
+ def __call__(self, message: str | list[dict[str, Any]], **kwargs) -> Any:
247
248
  """
248
249
  Execute automatic agentic loop and return typed response.
249
250
 
@@ -255,7 +256,10 @@ class MeshLlmAgent(Protocol):
255
256
  5. Return typed response
256
257
 
257
258
  Args:
258
- message: User message to send to LLM
259
+ message: Either:
260
+ - str: Single user message (will be wrapped in messages array)
261
+ - list[dict]: Full conversation history with messages in format
262
+ [{"role": "user|assistant|system", "content": "..."}]
259
263
  **kwargs: Additional context passed to LLM (provider-specific)
260
264
 
261
265
  Returns:
@@ -266,9 +270,18 @@ class MeshLlmAgent(Protocol):
266
270
  ValidationError: If LLM response doesn't match output type schema
267
271
  ToolExecutionError: If tool execution fails during agentic loop
268
272
 
269
- Example:
273
+ Example (single-turn):
270
274
  response = llm("Analyze this document: /path/to/file.pdf")
271
275
  # Returns ChatResponse(answer="...", confidence=0.95)
276
+
277
+ Example (multi-turn):
278
+ messages = [
279
+ {"role": "user", "content": "Hello, I need help with Python."},
280
+ {"role": "assistant", "content": "I'd be happy to help! What do you need?"},
281
+ {"role": "user", "content": "How do I read a file?"}
282
+ ]
283
+ response = llm(messages)
284
+ # Returns ChatResponse with contextual answer
272
285
  """
273
286
  ...
274
287
 
@@ -371,3 +384,39 @@ except ImportError:
371
384
  """Placeholder when Pydantic unavailable."""
372
385
 
373
386
  pass
387
+
388
+
389
+ @dataclass
390
+ class MeshLlmRequest:
391
+ """
392
+ Standard LLM request format for mesh-delegated LLM calls.
393
+
394
+ This dataclass is used when delegating LLM calls to mesh-registered LLM provider
395
+ agents via @mesh.llm_provider. It standardizes the request format across the mesh.
396
+
397
+ Usage:
398
+ Provider side (automatic with @mesh.llm_provider):
399
+ @mesh.llm_provider(model="anthropic/claude-sonnet-4-5", capability="llm")
400
+ def claude_provider():
401
+ pass # Automatically handles MeshLlmRequest
402
+
403
+ Consumer side (future with provider=dict):
404
+ @mesh.llm(provider={"capability": "llm", "tags": ["claude"]})
405
+ def chat(message: str, llm: MeshLlmAgent = None):
406
+ return llm(message) # Converts to MeshLlmRequest internally
407
+
408
+ Attributes:
409
+ messages: List of message dicts with "role" and "content" keys (and optionally "tool_calls")
410
+ tools: Optional list of tool definitions (MCP format)
411
+ model_params: Optional parameters to pass to the model (temperature, max_tokens, etc.)
412
+ context: Optional arbitrary context data for debugging/tracing
413
+ request_id: Optional request ID for tracking
414
+ caller_agent: Optional agent name that initiated the request
415
+ """
416
+
417
+ messages: List[Dict[str, Any]] # Changed from Dict[str, str] to allow tool_calls
418
+ tools: Optional[List[Dict]] = None
419
+ model_params: Optional[Dict] = None
420
+ context: Optional[Dict] = None
421
+ request_id: Optional[str] = None
422
+ caller_agent: Optional[str] = None