mcp-mesh 0.5.7__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. _mcp_mesh/__init__.py +1 -1
  2. _mcp_mesh/engine/base_injector.py +171 -0
  3. _mcp_mesh/engine/decorator_registry.py +136 -33
  4. _mcp_mesh/engine/dependency_injector.py +91 -18
  5. _mcp_mesh/engine/http_wrapper.py +5 -22
  6. _mcp_mesh/engine/llm_config.py +41 -0
  7. _mcp_mesh/engine/llm_errors.py +115 -0
  8. _mcp_mesh/engine/mesh_llm_agent.py +440 -0
  9. _mcp_mesh/engine/mesh_llm_agent_injector.py +487 -0
  10. _mcp_mesh/engine/response_parser.py +240 -0
  11. _mcp_mesh/engine/signature_analyzer.py +229 -99
  12. _mcp_mesh/engine/tool_executor.py +169 -0
  13. _mcp_mesh/engine/tool_schema_builder.py +125 -0
  14. _mcp_mesh/engine/unified_mcp_proxy.py +14 -12
  15. _mcp_mesh/generated/.openapi-generator/FILES +4 -0
  16. _mcp_mesh/generated/mcp_mesh_registry_client/__init__.py +81 -44
  17. _mcp_mesh/generated/mcp_mesh_registry_client/models/__init__.py +72 -35
  18. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter.py +132 -0
  19. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner.py +172 -0
  20. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_filter_filter_inner_one_of.py +92 -0
  21. _mcp_mesh/generated/mcp_mesh_registry_client/models/llm_tool_info.py +121 -0
  22. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_agent_registration.py +98 -51
  23. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_registration_response.py +93 -44
  24. _mcp_mesh/generated/mcp_mesh_registry_client/models/mesh_tool_registration.py +84 -41
  25. _mcp_mesh/pipeline/api_heartbeat/api_dependency_resolution.py +9 -72
  26. _mcp_mesh/pipeline/mcp_heartbeat/heartbeat_pipeline.py +6 -3
  27. _mcp_mesh/pipeline/mcp_heartbeat/llm_tools_resolution.py +222 -0
  28. _mcp_mesh/pipeline/mcp_startup/fastmcpserver_discovery.py +7 -0
  29. _mcp_mesh/pipeline/mcp_startup/heartbeat_preparation.py +65 -4
  30. _mcp_mesh/pipeline/mcp_startup/startup_pipeline.py +2 -2
  31. _mcp_mesh/shared/registry_client_wrapper.py +60 -4
  32. _mcp_mesh/utils/fastmcp_schema_extractor.py +476 -0
  33. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/METADATA +1 -1
  34. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/RECORD +39 -25
  35. mesh/__init__.py +8 -4
  36. mesh/decorators.py +344 -2
  37. mesh/types.py +145 -94
  38. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/WHEEL +0 -0
  39. {mcp_mesh-0.5.7.dist-info → mcp_mesh-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,440 @@
1
+ """
2
+ MeshLlmAgent proxy implementation.
3
+
4
+ Provides automatic agentic loop for LLM-based agents with tool integration.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import logging
10
+ from pathlib import Path
11
+ from typing import Any, Optional
12
+
13
+ from pydantic import BaseModel
14
+
15
+ from .llm_config import LLMConfig
16
+ from .llm_errors import (
17
+ LLMAPIError,
18
+ MaxIterationsError,
19
+ ResponseParseError,
20
+ ToolExecutionError,
21
+ )
22
+ from .response_parser import ResponseParser
23
+ from .tool_executor import ToolExecutor
24
+ from .tool_schema_builder import ToolSchemaBuilder
25
+
26
+ # Import Jinja2 for template rendering
27
+ try:
28
+ from jinja2 import Environment, FileSystemLoader, Template, TemplateSyntaxError
29
+ except ImportError:
30
+ Environment = None
31
+ FileSystemLoader = None
32
+ Template = None
33
+ TemplateSyntaxError = None
34
+
35
+ # Import litellm at module level for mocking in tests
36
+ try:
37
+ from litellm import completion
38
+ except ImportError:
39
+ completion = None
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ class MeshLlmAgent:
45
+ """
46
+ LLM agent proxy with automatic agentic loop.
47
+
48
+ Handles the complete flow:
49
+ 1. Format tools for LLM provider (via LiteLLM)
50
+ 2. Call LLM API with tools
51
+ 3. If tool_use: execute via MCP proxies, loop back to LLM
52
+ 4. If final response: parse into output type (Pydantic model)
53
+ 5. Return typed response
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ config: LLMConfig,
59
+ filtered_tools: list[dict[str, Any]],
60
+ output_type: type[BaseModel],
61
+ tool_proxies: Optional[dict[str, Any]] = None,
62
+ template_path: Optional[str] = None,
63
+ context_value: Optional[Any] = None,
64
+ ):
65
+ """
66
+ Initialize MeshLlmAgent proxy.
67
+
68
+ Args:
69
+ config: LLM configuration (provider, model, api_key, etc.)
70
+ filtered_tools: List of tool metadata from registry (for schema building)
71
+ output_type: Pydantic BaseModel for response validation
72
+ tool_proxies: Optional map of function_name -> proxy for tool execution
73
+ template_path: Optional path to Jinja2 template file for system prompt
74
+ context_value: Optional context for template rendering (MeshContextModel, dict, or None)
75
+ """
76
+ self.config = config
77
+ self.provider = config.provider
78
+ self.model = config.model
79
+ self.api_key = config.api_key
80
+ self.tools_metadata = filtered_tools # Tool metadata for schema building
81
+ self.tool_proxies = tool_proxies or {} # Proxies for execution
82
+ self.max_iterations = config.max_iterations
83
+ self.output_type = output_type
84
+ self.system_prompt = config.system_prompt # Public attribute for tests
85
+ self._iteration_count = 0
86
+
87
+ # Template rendering support (Phase 3)
88
+ self._template_path = template_path
89
+ self._context_value = context_value
90
+ self._template: Optional[Any] = None # Cached template object
91
+
92
+ # Load template if path provided
93
+ if template_path:
94
+ self._template = self._load_template(template_path)
95
+
96
+ # Build tool schemas for LLM (OpenAI format used by LiteLLM)
97
+ self._tool_schemas = ToolSchemaBuilder.build_schemas(self.tools_metadata)
98
+
99
+ # Cache tool calling instructions to prevent XML-style invocations
100
+ self._cached_tool_instructions = """
101
+
102
+ IMPORTANT TOOL CALLING RULES:
103
+ - You have access to tools that you can call to gather information
104
+ - Make ONE tool call at a time - each tool call must be separate
105
+ - NEVER combine multiple tools in a single tool_use block
106
+ - NEVER use XML-style syntax like <invoke name="tool_name"/>
107
+ - Each tool must be called using proper JSON tool_use format
108
+ - After receiving results from a tool, you can make additional tool calls if needed
109
+ - Once you have gathered all necessary information, provide your final response
110
+ """
111
+
112
+ # Cache JSON schema instructions (output_type never changes after init)
113
+ # This avoids regenerating the schema on every __call__
114
+ schema = self.output_type.model_json_schema()
115
+ schema_str = json.dumps(schema, indent=2)
116
+ self._cached_json_instructions = (
117
+ f"\n\nIMPORTANT: You must return your final response as valid JSON matching this schema:\n"
118
+ f"{schema_str}\n\nReturn ONLY the JSON object, no additional text."
119
+ )
120
+
121
+ logger.debug(
122
+ f"🤖 MeshLlmAgent initialized: provider={config.provider}, model={config.model}, "
123
+ f"tools={len(filtered_tools)}, max_iterations={config.max_iterations}"
124
+ )
125
+
126
+ def set_system_prompt(self, prompt: str) -> None:
127
+ """Override the system prompt at runtime."""
128
+ self.system_prompt = prompt
129
+ logger.debug(f"🔧 System prompt updated: {prompt[:50]}...")
130
+
131
+ def _load_template(self, template_path: str) -> Any:
132
+ """
133
+ Load Jinja2 template from file path.
134
+
135
+ Args:
136
+ template_path: Path to template file (relative or absolute)
137
+
138
+ Returns:
139
+ Jinja2 Template object
140
+
141
+ Raises:
142
+ FileNotFoundError: If template file not found
143
+ TemplateSyntaxError: If template has syntax errors
144
+ ImportError: If jinja2 not installed
145
+ """
146
+ if Environment is None:
147
+ raise ImportError(
148
+ "jinja2 is required for template rendering. Install with: pip install jinja2"
149
+ )
150
+
151
+ # Resolve template path
152
+ path = Path(template_path)
153
+
154
+ # If relative path, try to resolve it
155
+ if not path.is_absolute():
156
+ # Try relative to current working directory first
157
+ if path.exists():
158
+ template_file = path
159
+ else:
160
+ # If not found, raise error with helpful message
161
+ raise FileNotFoundError(
162
+ f"Template file not found: {template_path}\n"
163
+ f"Tried: {path.absolute()}"
164
+ )
165
+ else:
166
+ template_file = path
167
+ if not template_file.exists():
168
+ raise FileNotFoundError(f"Template file not found: {template_path}")
169
+
170
+ # Load template using FileSystemLoader for better error messages
171
+ template_dir = template_file.parent
172
+ template_name = template_file.name
173
+
174
+ env = Environment(loader=FileSystemLoader(str(template_dir)))
175
+
176
+ try:
177
+ template = env.get_template(template_name)
178
+ logger.debug(f"📄 Loaded template: {template_path}")
179
+ return template
180
+ except Exception as e:
181
+ # Re-raise with context
182
+ logger.error(f"❌ Failed to load template {template_path}: {e}")
183
+ raise
184
+
185
+ def _prepare_context(self, context_value: Any) -> dict:
186
+ """
187
+ Prepare context for template rendering.
188
+
189
+ Converts various context types to dict:
190
+ - MeshContextModel -> model_dump()
191
+ - dict -> use directly
192
+ - None -> empty dict {}
193
+ - Other types -> TypeError
194
+
195
+ Args:
196
+ context_value: Context value to prepare
197
+
198
+ Returns:
199
+ Dictionary for template rendering
200
+
201
+ Raises:
202
+ TypeError: If context is invalid type
203
+ """
204
+ if context_value is None:
205
+ return {}
206
+
207
+ # Check if it's a MeshContextModel (has model_dump method)
208
+ if hasattr(context_value, "model_dump") and callable(
209
+ context_value.model_dump
210
+ ):
211
+ return context_value.model_dump()
212
+
213
+ # Check if it's a dict
214
+ if isinstance(context_value, dict):
215
+ return context_value
216
+
217
+ # Invalid type
218
+ raise TypeError(
219
+ f"Invalid context type: {type(context_value).__name__}. "
220
+ f"Expected MeshContextModel, dict, or None."
221
+ )
222
+
223
+ def _render_system_prompt(self) -> str:
224
+ """
225
+ Render system prompt from template or return literal.
226
+
227
+ If template_path was provided in __init__, renders template with context.
228
+ If system_prompt was set via set_system_prompt(), uses that override.
229
+ Otherwise, uses config.system_prompt as literal.
230
+
231
+ Returns:
232
+ Rendered system prompt string
233
+
234
+ Raises:
235
+ jinja2.UndefinedError: If required template variable missing
236
+ """
237
+ # If runtime override via set_system_prompt(), use that
238
+ if self.system_prompt and self.system_prompt != self.config.system_prompt:
239
+ return self.system_prompt
240
+
241
+ # If template provided, render it
242
+ if self._template is not None:
243
+ context = self._prepare_context(self._context_value)
244
+ try:
245
+ rendered = self._template.render(**context)
246
+ logger.debug(
247
+ f"🎨 Rendered template with context: {list(context.keys())}"
248
+ )
249
+ return rendered
250
+ except Exception as e:
251
+ logger.error(f"❌ Template rendering error: {e}")
252
+ raise
253
+
254
+ # Otherwise, use literal system prompt from config
255
+ return self.system_prompt or ""
256
+
257
+ async def __call__(self, message: str, **kwargs) -> Any:
258
+ """
259
+ Execute automatic agentic loop and return typed response.
260
+
261
+ Args:
262
+ message: User message to process
263
+ **kwargs: Additional arguments passed to LLM
264
+
265
+ Returns:
266
+ Parsed response matching output_type
267
+
268
+ Raises:
269
+ MaxIterationsError: If max iterations exceeded
270
+ ToolExecutionError: If tool execution fails
271
+ ValidationError: If response doesn't match output_type schema
272
+ """
273
+ self._iteration_count = 0
274
+
275
+ # Check if litellm is available
276
+ if completion is None:
277
+ raise ImportError(
278
+ "litellm is required for MeshLlmAgent. Install with: pip install litellm"
279
+ )
280
+
281
+ # Build initial messages
282
+ messages = []
283
+
284
+ # Render system prompt (from template or literal)
285
+ base_system_prompt = self._render_system_prompt()
286
+
287
+ # Build system prompt with tool calling and JSON schema instructions
288
+ system_content = base_system_prompt
289
+
290
+ # Add tool calling instructions if tools are available
291
+ if self._tool_schemas:
292
+ system_content += self._cached_tool_instructions
293
+
294
+ # Add JSON schema instructions for final response
295
+ system_content += self._cached_json_instructions
296
+
297
+ # Debug: Log system prompt (truncated for privacy)
298
+ logger.debug(f"📝 System prompt: {system_content[:200]}...")
299
+
300
+ messages.append({"role": "system", "content": system_content})
301
+ messages.append({"role": "user", "content": message})
302
+
303
+ logger.info(f"🚀 Starting agentic loop for message: {message[:100]}...")
304
+
305
+ # Agentic loop
306
+ while self._iteration_count < self.max_iterations:
307
+ self._iteration_count += 1
308
+ logger.debug(
309
+ f"🔄 Iteration {self._iteration_count}/{self.max_iterations}..."
310
+ )
311
+
312
+ try:
313
+ # Call LLM with tools
314
+ try:
315
+ response = await asyncio.to_thread(
316
+ completion,
317
+ model=self.model,
318
+ messages=messages,
319
+ tools=self._tool_schemas if self._tool_schemas else None,
320
+ api_key=self.api_key,
321
+ **kwargs,
322
+ )
323
+ except Exception as e:
324
+ # Any exception from completion call is an LLM API error
325
+ logger.error(f"❌ LLM API error: {e}")
326
+ raise LLMAPIError(
327
+ provider=self.provider,
328
+ model=self.model,
329
+ original_error=e,
330
+ ) from e
331
+
332
+ # Extract response content
333
+ assistant_message = response.choices[0].message
334
+
335
+ # Check if LLM wants to use tools
336
+ if (
337
+ hasattr(assistant_message, "tool_calls")
338
+ and assistant_message.tool_calls
339
+ ):
340
+ tool_calls = assistant_message.tool_calls
341
+ logger.debug(f"🛠️ LLM requested {len(tool_calls)} tool calls")
342
+
343
+ # Add assistant message to history
344
+ messages.append(assistant_message.model_dump())
345
+
346
+ # Execute all tool calls
347
+ tool_results = await self._execute_tool_calls(tool_calls)
348
+
349
+ # Add tool results to messages
350
+ for tool_result in tool_results:
351
+ messages.append(tool_result)
352
+
353
+ # Continue loop to get final response
354
+ continue
355
+
356
+ # No tool calls - this is the final response
357
+ logger.debug("✅ Final response received from LLM")
358
+ logger.debug(
359
+ f"📥 Raw LLM response: {assistant_message.content[:500]}..."
360
+ )
361
+
362
+ # REMOVE_LATER: Debug full LLM response
363
+ logger.warning(
364
+ f"🔍 REMOVE_LATER: assistant_message type: {type(assistant_message)}"
365
+ )
366
+ logger.warning(
367
+ f"🔍 REMOVE_LATER: assistant_message.content type: {type(assistant_message.content)}"
368
+ )
369
+ logger.warning(
370
+ f"🔍 REMOVE_LATER: assistant_message.content is None: {assistant_message.content is None}"
371
+ )
372
+ if assistant_message.content:
373
+ logger.warning(
374
+ f"🔍 REMOVE_LATER: Full LLM response length: {len(assistant_message.content)}"
375
+ )
376
+ logger.warning(
377
+ f"🔍 REMOVE_LATER: Full LLM response: {assistant_message.content!r}"
378
+ )
379
+ else:
380
+ logger.warning(
381
+ "🔍 REMOVE_LATER: assistant_message.content is empty or None!"
382
+ )
383
+ logger.warning(
384
+ f"🔍 REMOVE_LATER: Full assistant_message: {assistant_message}"
385
+ )
386
+
387
+ return self._parse_response(assistant_message.content)
388
+
389
+ except LLMAPIError:
390
+ # Re-raise LLM API errors as-is
391
+ raise
392
+ except ToolExecutionError:
393
+ # Re-raise tool execution errors as-is
394
+ raise
395
+ except ResponseParseError:
396
+ # Re-raise response parse errors as-is
397
+ raise
398
+
399
+ # Max iterations exceeded
400
+ logger.error(
401
+ f"❌ Max iterations ({self.max_iterations}) exceeded without final response"
402
+ )
403
+ raise MaxIterationsError(
404
+ iteration_count=self._iteration_count,
405
+ max_allowed=self.max_iterations,
406
+ )
407
+
408
+ async def _execute_tool_calls(self, tool_calls: list[Any]) -> list[dict[str, Any]]:
409
+ """
410
+ Execute tool calls and return results.
411
+
412
+ Delegates to ToolExecutor for actual execution logic.
413
+
414
+ Args:
415
+ tool_calls: List of tool call objects from LLM response
416
+
417
+ Returns:
418
+ List of tool result messages for LLM conversation
419
+
420
+ Raises:
421
+ ToolExecutionError: If tool execution fails
422
+ """
423
+ return await ToolExecutor.execute_calls(tool_calls, self.tool_proxies)
424
+
425
+ def _parse_response(self, content: str) -> Any:
426
+ """
427
+ Parse LLM response into output type.
428
+
429
+ Delegates to ResponseParser for actual parsing logic.
430
+
431
+ Args:
432
+ content: Response content from LLM
433
+
434
+ Returns:
435
+ Parsed Pydantic model instance
436
+
437
+ Raises:
438
+ ResponseParseError: If response doesn't match output_type schema or invalid JSON
439
+ """
440
+ return ResponseParser.parse(content, self.output_type)