mcp-mesh 0.8.0b9__py3-none-any.whl → 0.9.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,15 +1,22 @@
1
1
  """
2
2
  Gemini/Google provider handler.
3
3
 
4
- Optimized for Gemini models (Gemini 2.0 Flash, Gemini 1.5 Pro, etc.)
4
+ Optimized for Gemini models (Gemini 3 Flash, Gemini 3 Pro, Gemini 2.0 Flash, etc.)
5
5
  using Google's best practices for tool calling and structured output.
6
6
 
7
7
  Features:
8
- - Native structured output via response_format (similar to OpenAI)
8
+ - Mixed structured output strategy based on tool presence:
9
+ - STRICT mode (response_format) when NO tools are present
10
+ - HINT mode (prompt-based JSON hints) when tools ARE present
9
11
  - Native function calling support
10
12
  - Support for Gemini 2.x and 3.x models
11
13
  - Large context windows (up to 2M tokens)
12
14
 
15
+ Note:
16
+ - Gemini 3 + response_format + tools causes non-deterministic infinite tool loops,
17
+ so we avoid response_format when tools are present and use HINT mode instead.
18
+ - STRICT mode (response_format) is only used for tool-free requests.
19
+
13
20
  Reference:
14
21
  - https://docs.litellm.ai/docs/providers/gemini
15
22
  - https://ai.google.dev/gemini-api/docs
@@ -24,8 +31,13 @@ from .base_provider_handler import (
24
31
  BASE_TOOL_INSTRUCTIONS,
25
32
  BaseProviderHandler,
26
33
  make_schema_strict,
34
+ sanitize_schema_for_structured_output,
27
35
  )
28
36
 
37
+ OUTPUT_MODE_TEXT = "text"
38
+ OUTPUT_MODE_STRICT = "strict"
39
+ OUTPUT_MODE_HINT = "hint"
40
+
29
41
  logger = logging.getLogger(__name__)
30
42
 
31
43
 
@@ -34,24 +46,23 @@ class GeminiHandler(BaseProviderHandler):
34
46
  Provider handler for Google Gemini models.
35
47
 
36
48
  Gemini Characteristics:
37
- - Native structured output via response_format parameter (LiteLLM translates)
49
+ - Mixed structured output strategy:
50
+ - STRICT mode (response_format) when NO tools are present
51
+ - HINT mode (prompt-based JSON hints) when tools ARE present
52
+ - Gemini 3 + response_format + tools causes non-deterministic infinite tool loops,
53
+ so we avoid combining them.
38
54
  - Native function calling support
39
55
  - Large context windows (1M-2M tokens)
40
56
  - Multimodal support (text, images, video, audio)
41
57
  - Works well with concise, focused prompts
42
58
 
43
- Key Similarities with OpenAI:
44
- - Uses response_format for structured output (via LiteLLM translation)
45
- - Native function calling format
46
- - Similar schema enforcement requirements
47
-
48
59
  Supported Models (via LiteLLM):
60
+ - gemini/gemini-3-flash-preview (reasoning support)
61
+ - gemini/gemini-3-pro-preview (advanced reasoning)
49
62
  - gemini/gemini-2.0-flash (fast, efficient)
50
63
  - gemini/gemini-2.0-flash-lite (fastest, most efficient)
51
64
  - gemini/gemini-1.5-pro (high capability)
52
65
  - gemini/gemini-1.5-flash (balanced)
53
- - gemini/gemini-3-flash-preview (reasoning support)
54
- - gemini/gemini-3-pro-preview (advanced reasoning)
55
66
 
56
67
  Reference:
57
68
  https://docs.litellm.ai/docs/providers/gemini
@@ -60,11 +71,29 @@ class GeminiHandler(BaseProviderHandler):
60
71
  def __init__(self):
61
72
  """Initialize Gemini handler."""
62
73
  super().__init__(vendor="gemini")
74
+ # Store output schema for use in format_system_prompt (set by apply_structured_output)
75
+ self._pending_output_schema: dict[str, Any] | None = None
76
+ self._pending_output_type_name: str | None = None
77
+
78
+ def determine_output_mode(self, output_type, override_mode=None):
79
+ """Determine output mode for Gemini.
80
+
81
+ Gemini 3 supports native response_format with tools.
82
+ Uses STRICT mode (response_format) for all schema types.
83
+ TEXT mode for string return types.
84
+ """
85
+ if override_mode:
86
+ return override_mode
87
+ if output_type is str:
88
+ return OUTPUT_MODE_TEXT
89
+ if isinstance(output_type, type) and issubclass(output_type, BaseModel):
90
+ return OUTPUT_MODE_STRICT
91
+ return OUTPUT_MODE_STRICT
63
92
 
64
93
  def prepare_request(
65
94
  self,
66
95
  messages: list[dict[str, Any]],
67
- tools: Optional[list[dict[str, Any]]],
96
+ tools: list[dict[str, Any]] | None,
68
97
  output_type: type,
69
98
  **kwargs: Any,
70
99
  ) -> dict[str, Any]:
@@ -72,8 +101,8 @@ class GeminiHandler(BaseProviderHandler):
72
101
  Prepare request parameters for Gemini API via LiteLLM.
73
102
 
74
103
  Gemini Strategy:
75
- - Use response_format parameter for structured JSON output
76
- - LiteLLM handles translation to Gemini's native format
104
+ - Use native response_format with strict schema (Gemini 3+)
105
+ - Store schema for HINT fallback in format_system_prompt
77
106
  - Skip structured output for str return types (text mode)
78
107
 
79
108
  Args:
@@ -98,46 +127,60 @@ class GeminiHandler(BaseProviderHandler):
98
127
 
99
128
  # Skip structured output for str return type (text mode)
100
129
  if output_type is str:
130
+ self._pending_output_schema = None
131
+ self._pending_output_type_name = None
101
132
  return request_params
102
133
 
103
- # Only add response_format for Pydantic models
104
- if not (isinstance(output_type, type) and issubclass(output_type, BaseModel)):
105
- return request_params
106
-
107
- # Add response_format for structured output
108
- # LiteLLM translates this to Gemini's native format
109
- schema = output_type.model_json_schema()
110
-
111
- # Transform schema for strict mode compliance
112
- # Gemini requires additionalProperties: false and all properties in required
113
- schema = make_schema_strict(schema, add_all_required=True)
114
-
115
- # Gemini structured output format (via LiteLLM)
116
- request_params["response_format"] = {
117
- "type": "json_schema",
118
- "json_schema": {
119
- "name": output_type.__name__,
120
- "schema": schema,
121
- "strict": True, # Enforce schema compliance
122
- },
123
- }
134
+ # Only store schema for Pydantic models
135
+ if isinstance(output_type, type) and issubclass(output_type, BaseModel):
136
+ schema = output_type.model_json_schema()
137
+ schema = sanitize_schema_for_structured_output(schema)
138
+
139
+ # Store for HINT mode in format_system_prompt (always needed as fallback)
140
+ self._pending_output_schema = schema
141
+ self._pending_output_type_name = output_type.__name__
142
+
143
+ # Only use response_format when NO tools present
144
+ # Gemini 3 + response_format + tools causes non-deterministic infinite tool loops
145
+ if not tools:
146
+ strict_schema = make_schema_strict(schema, add_all_required=True)
147
+ request_params["response_format"] = {
148
+ "type": "json_schema",
149
+ "json_schema": {
150
+ "name": output_type.__name__,
151
+ "schema": strict_schema,
152
+ "strict": True,
153
+ },
154
+ }
155
+ logger.debug(
156
+ "Gemini: Using response_format with strict schema for '%s' (no tools)",
157
+ output_type.__name__,
158
+ )
159
+ else:
160
+ logger.debug(
161
+ "Gemini: Using HINT mode for '%s' (tools present, response_format skipped)",
162
+ output_type.__name__,
163
+ )
164
+ else:
165
+ self._pending_output_schema = None
166
+ self._pending_output_type_name = None
124
167
 
125
168
  return request_params
126
169
 
127
170
  def format_system_prompt(
128
171
  self,
129
172
  base_prompt: str,
130
- tool_schemas: Optional[list[dict[str, Any]]],
173
+ tool_schemas: list[dict[str, Any]] | None,
131
174
  output_type: type,
132
175
  ) -> str:
133
176
  """
134
- Format system prompt for Gemini (concise approach).
177
+ Format system prompt for Gemini with structured output support.
135
178
 
136
179
  Gemini Strategy:
137
180
  1. Use base prompt as-is
138
181
  2. Add tool calling instructions if tools present
139
- 3. Minimal JSON instructions (response_format handles structure)
140
- 4. Keep prompt concise - Gemini works well with clear, direct prompts
182
+ 3. STRICT mode: Brief note (response_format handles enforcement)
183
+ 4. HINT mode fallback: Detailed JSON output instructions with example structure
141
184
 
142
185
  Args:
143
186
  base_prompt: Base system prompt
@@ -153,13 +196,77 @@ class GeminiHandler(BaseProviderHandler):
153
196
  if tool_schemas:
154
197
  system_content += BASE_TOOL_INSTRUCTIONS
155
198
 
156
- # Skip JSON note for str return type (text mode)
157
- if output_type is str:
199
+ # Get the output schema (may have been set by apply_structured_output or prepare_request)
200
+ # Check pending schema FIRST - it may be set even when output_type is str (delegate path)
201
+ output_schema = self._pending_output_schema
202
+ output_type_name = self._pending_output_type_name
203
+
204
+ # Fall back to output_type if no pending schema AND output_type is Pydantic model
205
+ if output_schema is None:
206
+ if output_type is str:
207
+ # No schema and str return type - skip JSON instructions
208
+ return system_content
209
+ elif isinstance(output_type, type) and issubclass(output_type, BaseModel):
210
+ output_schema = sanitize_schema_for_structured_output(
211
+ output_type.model_json_schema()
212
+ )
213
+ output_type_name = output_type.__name__
214
+
215
+ # Determine output mode
216
+ determined_mode = self.determine_output_mode(output_type)
217
+
218
+ # STRICT mode: Brief note (response_format handles enforcement)
219
+ # But only when no tools - with tools we use HINT mode
220
+ if determined_mode == OUTPUT_MODE_STRICT and not tool_schemas:
221
+ system_content += f"\n\nYour final response will be structured as JSON matching the {output_type_name} format."
158
222
  return system_content
159
223
 
160
- # Add brief JSON note (response_format handles enforcement)
161
- if isinstance(output_type, type) and issubclass(output_type, BaseModel):
162
- system_content += f"\n\nYour final response will be structured as JSON matching the {output_type.__name__} format."
224
+ # HINT mode fallback: Detailed JSON instructions
225
+ if output_schema is not None:
226
+ system_content += "\n\nOUTPUT FORMAT:\n"
227
+
228
+ # Add DECISION GUIDE if tools are available
229
+ if tool_schemas:
230
+ system_content += "DECISION GUIDE:\n"
231
+ system_content += "- If your answer requires real-time data (weather, calculations, etc.), call the appropriate tool FIRST, then format your response as JSON.\n"
232
+ system_content += "- If your answer is general knowledge (like facts, explanations, definitions), directly return your response as JSON WITHOUT calling tools.\n\n"
233
+
234
+ system_content += "Your FINAL response must be ONLY valid JSON (no markdown, no code blocks) with this exact structure:\n"
235
+ system_content += "{\n"
236
+
237
+ # Build example showing expected structure with descriptions
238
+ properties = output_schema.get("properties", {})
239
+ prop_items = list(properties.items())
240
+ for i, (prop_name, prop_schema) in enumerate(prop_items):
241
+ prop_type = prop_schema.get("type", "string")
242
+ prop_desc = prop_schema.get("description", "")
243
+
244
+ # Show example value based on type
245
+ if prop_type == "string":
246
+ example_value = f'"<your {prop_name} here>"'
247
+ elif prop_type in ("number", "integer"):
248
+ example_value = "0"
249
+ elif prop_type == "array":
250
+ example_value = '["item1", "item2"]'
251
+ elif prop_type == "boolean":
252
+ example_value = "true"
253
+ elif prop_type == "object":
254
+ example_value = "{}"
255
+ else:
256
+ example_value = "..."
257
+
258
+ # Add comma for all but last property
259
+ comma = "," if i < len(prop_items) - 1 else ""
260
+ # Include description as comment if available
261
+ if prop_desc:
262
+ system_content += (
263
+ f' "{prop_name}": {example_value}{comma} // {prop_desc}\n'
264
+ )
265
+ else:
266
+ system_content += f' "{prop_name}": {example_value}{comma}\n'
267
+
268
+ system_content += "}\n\n"
269
+ system_content += "Return ONLY the JSON object with actual values. Do not include the schema definition, markdown formatting, or code blocks."
163
270
 
164
271
  return system_content
165
272
 
@@ -171,11 +278,74 @@ class GeminiHandler(BaseProviderHandler):
171
278
  Capability flags for Gemini
172
279
  """
173
280
  return {
174
- "native_tool_calling": True, # Gemini has native function calling
175
- "structured_output": True, # Supports structured output via response_format
176
- "streaming": True, # Supports streaming
177
- "vision": True, # Gemini supports multimodal (images, video, audio)
281
+ "native_tool_calling": True,
282
+ "structured_output": True, # Via native response_format (Gemini 3+)
283
+ "streaming": True,
284
+ "vision": True,
178
285
  "json_mode": True, # Native JSON mode via response_format
179
- "large_context": True, # Up to 2M tokens context window
286
+ "large_context": True,
180
287
  }
181
288
 
289
+ def apply_structured_output(
290
+ self,
291
+ output_schema: dict[str, Any],
292
+ output_type_name: str | None,
293
+ model_params: dict[str, Any],
294
+ ) -> dict[str, Any]:
295
+ """
296
+ Apply Gemini-specific structured output for mesh delegation.
297
+
298
+ Uses HINT mode (prompt injection) because mesh delegation always involves
299
+ tools, and Gemini 3 + response_format + tools causes infinite tool loops.
300
+ """
301
+ sanitized_schema = sanitize_schema_for_structured_output(output_schema)
302
+
303
+ # Store for format_system_prompt
304
+ self._pending_output_schema = sanitized_schema
305
+ self._pending_output_type_name = output_type_name
306
+
307
+ # Inject HINT instructions into system messages
308
+ # (mesh delegation always has tools, so we can't use response_format)
309
+ messages = model_params.get("messages", [])
310
+ for msg in messages:
311
+ if msg.get("role") == "system":
312
+ base_content = msg.get("content", "")
313
+ # Build hint instructions
314
+ hint_text = "\n\nOUTPUT FORMAT:\n"
315
+ hint_text += "Your FINAL response must be ONLY valid JSON (no markdown, no code blocks) with this exact structure:\n"
316
+ hint_text += "{\n"
317
+ properties = sanitized_schema.get("properties", {})
318
+ prop_items = list(properties.items())
319
+ for i, (prop_name, prop_schema) in enumerate(prop_items):
320
+ prop_type = prop_schema.get("type", "string")
321
+ if prop_type == "string":
322
+ example_value = f'"<your {prop_name} here>"'
323
+ elif prop_type in ("number", "integer"):
324
+ example_value = "0"
325
+ elif prop_type == "array":
326
+ example_value = '["item1", "item2"]'
327
+ elif prop_type == "boolean":
328
+ example_value = "true"
329
+ elif prop_type == "object":
330
+ example_value = "{}"
331
+ else:
332
+ example_value = "..."
333
+ comma = "," if i < len(prop_items) - 1 else ""
334
+ prop_desc = prop_schema.get("description", "")
335
+ if prop_desc:
336
+ hint_text += (
337
+ f' "{prop_name}": {example_value}{comma} // {prop_desc}\n'
338
+ )
339
+ else:
340
+ hint_text += f' "{prop_name}": {example_value}{comma}\n'
341
+ hint_text += "}\n\n"
342
+ hint_text += "Return ONLY the JSON object with actual values. Do not include the schema definition, markdown formatting, or code blocks."
343
+
344
+ msg["content"] = base_content + hint_text
345
+ break
346
+
347
+ logger.info(
348
+ "Gemini hint mode for '%s' (mesh delegation, schema in prompt)",
349
+ output_type_name or "Response",
350
+ )
351
+ return model_params
@@ -160,3 +160,34 @@ TOOL CALLING RULES:
160
160
  "vision": False, # Conservative - not all models support vision
161
161
  "json_mode": False, # Conservative - use prompt-based JSON instead
162
162
  }
163
+
164
+ def apply_structured_output(
165
+ self,
166
+ output_schema: dict[str, Any],
167
+ output_type_name: Optional[str],
168
+ model_params: dict[str, Any],
169
+ ) -> dict[str, Any]:
170
+ """
171
+ Apply structured output for generic vendors.
172
+
173
+ Generic strategy: Don't use response_format since not all vendors support it.
174
+ The consumer should rely on prompt-based instructions instead.
175
+
176
+ Args:
177
+ output_schema: JSON schema dict from consumer
178
+ output_type_name: Name of the output type
179
+ model_params: Current model parameters dict
180
+
181
+ Returns:
182
+ Unmodified model_params (no response_format added)
183
+ """
184
+ # Don't add response_format - generic vendors may not support it
185
+ # The consumer's system prompt should include JSON instructions
186
+ import logging
187
+
188
+ logger = logging.getLogger(__name__)
189
+ logger.debug(
190
+ f"⚠️ Generic handler: skipping response_format for '{output_type_name}' "
191
+ f"(vendor '{self.vendor}' may not support structured output)"
192
+ )
193
+ return model_params
@@ -13,6 +13,7 @@ from .base_provider_handler import (
13
13
  BASE_TOOL_INSTRUCTIONS,
14
14
  BaseProviderHandler,
15
15
  make_schema_strict,
16
+ sanitize_schema_for_structured_output,
16
17
  )
17
18
 
18
19
 
@@ -96,6 +97,9 @@ class OpenAIHandler(BaseProviderHandler):
96
97
  # rather than relying on prompt instructions alone
97
98
  schema = output_type.model_json_schema()
98
99
 
100
+ # Sanitize schema first to remove unsupported validation keywords (minimum, maximum, etc.)
101
+ schema = sanitize_schema_for_structured_output(schema)
102
+
99
103
  # Transform schema for OpenAI strict mode
100
104
  # OpenAI requires additionalProperties: false and all properties in required
101
105
  schema = make_schema_strict(schema, add_all_required=True)
@@ -176,4 +180,3 @@ class OpenAIHandler(BaseProviderHandler):
176
180
  "vision": True, # GPT-4V and later support vision
177
181
  "json_mode": True, # Has dedicated JSON mode via response_format
178
182
  }
179
-
@@ -104,9 +104,10 @@ def _build_api_agent_spec(context: dict[str, Any], service_id: str = None) -> An
104
104
  # Build dependency specs
105
105
  deps = []
106
106
  for dep_cap in dependencies:
107
+ # Tags must be serialized to JSON string (Rust core expects string, not list)
107
108
  dep_spec = core.DependencySpec(
108
109
  capability=dep_cap,
109
- tags=[],
110
+ tags=json.dumps([]),
110
111
  version=None,
111
112
  )
112
113
  deps.append(dep_spec)
@@ -136,6 +137,7 @@ def _build_api_agent_spec(context: dict[str, Any], service_id: str = None) -> An
136
137
  http_port=http_port,
137
138
  http_host=http_host,
138
139
  namespace=namespace,
140
+ agent_type="api", # API services only consume capabilities, not provide them
139
141
  tools=tools if tools else None,
140
142
  llm_agents=None, # API services don't have LLM agents
141
143
  heartbeat_interval=heartbeat_interval,
@@ -272,6 +274,8 @@ async def _handle_api_dependency_change(
272
274
  )
273
275
  if not current_service_id:
274
276
  # Use config resolver for consistent env var handling
277
+ from ...shared.config_resolver import get_config_value
278
+
275
279
  current_service_id = get_config_value("MCP_MESH_AGENT_ID")
276
280
 
277
281
  is_self_dependency = (
@@ -117,9 +117,12 @@ def _build_agent_spec(context: dict[str, Any]) -> Any:
117
117
  # Build dependency specs
118
118
  deps = []
119
119
  for dep_info in tool_metadata.get("dependencies", []):
120
+ # Serialize tags to JSON to support nested arrays for OR alternatives
121
+ # e.g., ["addition", ["python", "typescript"]] -> addition AND (python OR typescript)
122
+ tags_json = json.dumps(dep_info.get("tags", []))
120
123
  dep_spec = core.DependencySpec(
121
124
  capability=dep_info.get("capability", ""),
122
- tags=dep_info.get("tags", []),
125
+ tags=tags_json,
123
126
  version=dep_info.get("version"),
124
127
  )
125
128
  deps.append(dep_spec)
@@ -198,6 +201,21 @@ def _build_agent_spec(context: dict[str, Any]) -> Any:
198
201
  )
199
202
  break
200
203
 
204
+ # Extract kwargs (non-standard fields like vendor)
205
+ # These are spread as top-level keys in metadata by @mesh.tool(**kwargs)
206
+ standard_fields = {
207
+ "capability",
208
+ "tags",
209
+ "version",
210
+ "description",
211
+ "dependencies",
212
+ "input_schema",
213
+ }
214
+ kwargs_data = {
215
+ k: v for k, v in tool_metadata.items() if k not in standard_fields
216
+ }
217
+ kwargs_json = json.dumps(kwargs_data) if kwargs_data else None
218
+
201
219
  tool_spec = core.ToolSpec(
202
220
  function_name=tool_name,
203
221
  capability=tool_metadata.get("capability", tool_name),
@@ -208,6 +226,7 @@ def _build_agent_spec(context: dict[str, Any]) -> Any:
208
226
  input_schema=input_schema_json,
209
227
  llm_filter=llm_filter_json,
210
228
  llm_provider=llm_provider_json,
229
+ kwargs=kwargs_json,
211
230
  )
212
231
  tools.append(tool_spec)
213
232
  logger.info(
@@ -269,6 +288,14 @@ async def _handle_mesh_event(event: Any, context: dict[str, Any]) -> None:
269
288
  if event_type == "agent_registered":
270
289
  logger.info(f"Agent registered with ID: {event.agent_id}")
271
290
 
291
+ # Initialize direct LiteLLM agents that don't need mesh delegation
292
+ # These agents have provider="string" and filter=None, so all info is
293
+ # available at decorator time - no need to wait for registry response
294
+ from ...engine.dependency_injector import get_global_injector
295
+
296
+ injector = get_global_injector()
297
+ injector.initialize_direct_llm_agents()
298
+
272
299
  elif event_type == "registration_failed":
273
300
  logger.error(f"Agent registration failed: {event.error}")
274
301
 
@@ -548,6 +575,7 @@ async def _handle_llm_tools_update(
548
575
  "input_schema": (
549
576
  json.loads(tool.input_schema) if tool.input_schema else None
550
577
  ),
578
+ "description": tool.description if tool.description else "",
551
579
  }
552
580
  tool_list.append(tool_info)
553
581
 
@@ -595,6 +623,7 @@ async def _handle_llm_provider_update(
595
623
  "endpoint": provider_info.endpoint,
596
624
  "name": provider_info.function_name, # OpenAPI contract uses "name"
597
625
  "model": provider_info.model,
626
+ "vendor": provider_info.vendor, # For handler selection
598
627
  }
599
628
  }
600
629
  injector.process_llm_providers(llm_providers)
@@ -102,7 +102,9 @@ class TraceContextHelper:
102
102
  trace_id = request.headers.get("X-Trace-ID")
103
103
  parent_span = request.headers.get("X-Parent-Span")
104
104
 
105
- # Try extracting from JSON-RPC body as fallback
105
+ # Try extracting from JSON-RPC body as fallback (for TypeScript agents that
106
+ # inject trace context into arguments since FastMCP doesn't expose HTTP headers)
107
+ # Keys use underscore prefix to match injection convention: _trace_id, _parent_span
106
108
  if not trace_id:
107
109
  try:
108
110
  body = await request.body()
@@ -110,8 +112,8 @@ class TraceContextHelper:
110
112
  payload = json.loads(body.decode("utf-8"))
111
113
  if payload.get("method") == "tools/call":
112
114
  arguments = payload.get("params", {}).get("arguments", {})
113
- trace_id = arguments.get("trace_id")
114
- parent_span = arguments.get("parent_span")
115
+ trace_id = arguments.get("_trace_id")
116
+ parent_span = arguments.get("_parent_span")
115
117
  except Exception:
116
118
  pass
117
119
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-mesh
3
- Version: 0.8.0b9
3
+ Version: 0.9.0b1
4
4
  Summary: Kubernetes-native platform for distributed MCP applications
5
5
  Project-URL: Homepage, https://github.com/dhyansraj/mcp-mesh
6
6
  Project-URL: Documentation, https://github.com/dhyansraj/mcp-mesh/tree/main/docs
@@ -18,6 +18,8 @@ Classifier: Operating System :: OS Independent
18
18
  Classifier: Programming Language :: Python :: 3
19
19
  Classifier: Programming Language :: Python :: 3.11
20
20
  Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Programming Language :: Python :: 3.13
22
+ Classifier: Programming Language :: Python :: 3.14
21
23
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
24
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
25
  Classifier: Topic :: System :: Distributed Computing
@@ -30,7 +32,7 @@ Requires-Dist: fastmcp<3.0.0,>=2.8.0
30
32
  Requires-Dist: httpx<1.0.0,>=0.25.0
31
33
  Requires-Dist: jinja2>=3.1.0
32
34
  Requires-Dist: litellm>=1.30.0
33
- Requires-Dist: mcp-mesh-core>=0.8.0b9
35
+ Requires-Dist: mcp-mesh-core>=0.9.0b1
34
36
  Requires-Dist: mcp<2.0.0,>=1.9.0
35
37
  Requires-Dist: prometheus-client<1.0.0,>=0.19.0
36
38
  Requires-Dist: pydantic<3.0.0,>=2.4.0
@@ -1,16 +1,16 @@
1
- _mcp_mesh/__init__.py,sha256=Pi7UIWRma8F9zTC-gcmeT5ApFCxpepQYRYMvWRVfDtE,2721
1
+ _mcp_mesh/__init__.py,sha256=XUX8d7g4R88-074M-kF4oZqBAM6d2f6E3r4sZc_j7n0,2721
2
2
  _mcp_mesh/reload.py,sha256=5Yll9n0bqxM7pmTjfAaKWg-WT_Vi0YTh0_UNWbCNCIQ,6217
3
3
  _mcp_mesh/reload_runner.py,sha256=SgQKzzO2yHfSUBq8s3SpAnovWA0rveimVNaxeLCEo_0,1310
4
4
  _mcp_mesh/engine/__init__.py,sha256=U_6Kw3vA_3RiNK0Oln5c5C7WvA9lSONV22wWzfxYHNw,2975
5
5
  _mcp_mesh/engine/async_mcp_client.py,sha256=Sz-rXTkb1Mng_f0SpLqLuOdPJ8vZjv3DFy0i8yYOqYk,8792
6
6
  _mcp_mesh/engine/base_injector.py,sha256=qzRLZqFP2VvEFagVovkpdldvDmm3VwPHm6tHwV58a2k,5648
7
7
  _mcp_mesh/engine/decorator_registry.py,sha256=cch2QdQ6bKjHKEGi1XWp1YcLLO3uI2YlxwWBO7Np65E,28229
8
- _mcp_mesh/engine/dependency_injector.py,sha256=kjRUA4Lyj9zfYJ67NuFjx9YSdZEc3QtxTTpy3ww8YTg,31584
8
+ _mcp_mesh/engine/dependency_injector.py,sha256=p_W7pJNnGNGvixiv14CsjRV5If7Ll1VdV9BEU9cde1g,31923
9
9
  _mcp_mesh/engine/http_wrapper.py,sha256=Simd6IEsLO2FXQOuf1WEx57SBN6DSr5RzphXnk0asHM,24152
10
10
  _mcp_mesh/engine/llm_config.py,sha256=95bOsGWro5E1JGq7oZtEYhVdrzcIJqjht_r5vEdJVz4,2049
11
11
  _mcp_mesh/engine/llm_errors.py,sha256=h7BiI14u-jL8vtvBfFbFDDrN7gIw8PQjXIl5AP1SBuA,3276
12
- _mcp_mesh/engine/mesh_llm_agent.py,sha256=sVh7lPnvixDVJ-p1ONzbeakiEzhsl0HmdmrLPZA2FzQ,34237
13
- _mcp_mesh/engine/mesh_llm_agent_injector.py,sha256=CHfyXWBjRPdsWtV5LTr1TsVPqKu6H0I0xZ2e62MB1p4,28820
12
+ _mcp_mesh/engine/mesh_llm_agent.py,sha256=tKUjDqxoBxzsnWQm1T3RR0S1duHKgG6MSFiHfMjcFJU,35677
13
+ _mcp_mesh/engine/mesh_llm_agent_injector.py,sha256=L3ptyni_lOclS6gRT7z5Paw5lLcrjbCRQnMCW8OMnBU,37899
14
14
  _mcp_mesh/engine/response_parser.py,sha256=g3VNoFJotaLrOAS0pL_OTCrv9t9XQe9Iiz1plsm28bQ,10280
15
15
  _mcp_mesh/engine/self_dependency_proxy.py,sha256=OkKt0-B_ADnJlWtHiHItoZCBZ7Su0iz2unEPFfXvrs4,3302
16
16
  _mcp_mesh/engine/session_aware_client.py,sha256=QejKag5zYNos5BVffQvNXFMECHFMLNOv78By4e_JzQE,10589
@@ -19,17 +19,17 @@ _mcp_mesh/engine/signature_analyzer.py,sha256=bG9HEsDtJlzeS2ueypLpcp7qD4_zso4DH1
19
19
  _mcp_mesh/engine/tool_executor.py,sha256=Bf_9d02EEY9_yHm1p1-5YZ4rY6MPxn4SVpI6-3sm1uo,5456
20
20
  _mcp_mesh/engine/tool_schema_builder.py,sha256=SQCxQIrSfdLu9-dLqiFurQLK7dhl0dc0xa0ibaxU-iE,3644
21
21
  _mcp_mesh/engine/unified_mcp_proxy.py,sha256=SZXlgdeNzlEGynwLLmmTi5R1-OrBaw4P8Izqhfn-zmI,37846
22
- _mcp_mesh/engine/provider_handlers/__init__.py,sha256=8hEc4CheKfXWU3ny4YDktxNxLCWxgfMtyDW9CblPOvs,888
23
- _mcp_mesh/engine/provider_handlers/base_provider_handler.py,sha256=Lb0U6gAEseU7Ix1eeV4T0WP1ClmeXUz87Nx_iplUYSI,8077
24
- _mcp_mesh/engine/provider_handlers/claude_handler.py,sha256=iYAmllL5rTWFFTAjbR62Ra9eMWNZjA72a02tppxjgOQ,14343
25
- _mcp_mesh/engine/provider_handlers/gemini_handler.py,sha256=SrJuhlaoPPHGRQH4jFXR1C_M0FtKeFcvJHERkpIPRNA,6055
26
- _mcp_mesh/engine/provider_handlers/generic_handler.py,sha256=rAE3QzoB7vz4zbBPPDJTI0MOuJrfhrOdXSyzsob4uVI,5489
27
- _mcp_mesh/engine/provider_handlers/openai_handler.py,sha256=1WhiHqf72N3584tJDVaBonDmu64tPuuiP12vzPrUNO0,6204
22
+ _mcp_mesh/engine/provider_handlers/__init__.py,sha256=07aK9bdo2-XlzkmdIqTOJQtZbycERBJNQN90nJg6AZg,934
23
+ _mcp_mesh/engine/provider_handlers/base_provider_handler.py,sha256=ExvRwfb2QVMbebWyuKQqJH_V-pkJ6R-3gnVGebBgu9o,13870
24
+ _mcp_mesh/engine/provider_handlers/claude_handler.py,sha256=t23IygAIqeEJuIL16PFdL6COHrCcHb8XrXYiVnX8Vpg,15455
25
+ _mcp_mesh/engine/provider_handlers/gemini_handler.py,sha256=cfCkXN6VJWxzwRUvklwA8_Le1LYYJpFqpvxBRjIwGUU,14338
26
+ _mcp_mesh/engine/provider_handlers/generic_handler.py,sha256=SBPWpEf0u-797WBZXzMcr78JTSnxPmgBsqvn7zIqJVw,6617
27
+ _mcp_mesh/engine/provider_handlers/openai_handler.py,sha256=fIxALmtdE5JL80c59jHt7btQvYddXNsTSU1xZ9xJ45g,6417
28
28
  _mcp_mesh/engine/provider_handlers/provider_handler_registry.py,sha256=klBZW8iX6Jk88TASAUyP8oPo4wvLB3DaTNFHeYvGLiA,5708
29
29
  _mcp_mesh/pipeline/__init__.py,sha256=MgPwpwbiD62ND4HXKKNGcnreDk-TvPmQOs5WmjtHQ3M,1263
30
30
  _mcp_mesh/pipeline/api_heartbeat/__init__.py,sha256=qGjEgxbGJFSl9Qm3bwu3X5yizAMbN4WpFtIUekDSFuU,690
31
31
  _mcp_mesh/pipeline/api_heartbeat/api_lifespan_integration.py,sha256=h0mTmLyPlGDqomSHpbW7S-AZNz1Tyvg1kpy9aeWkQsU,3879
32
- _mcp_mesh/pipeline/api_heartbeat/rust_api_heartbeat.py,sha256=vrld873jS2zCfJCndldx2XLDjxp_bRY7qq9CNBqZ8wI,15579
32
+ _mcp_mesh/pipeline/api_heartbeat/rust_api_heartbeat.py,sha256=7Dv3lGTn2n2WrDpXoWqxUlqi6NwjPRH17-GVbsUckgE,15843
33
33
  _mcp_mesh/pipeline/api_startup/__init__.py,sha256=eivolkSKot2bJTWP2BV8-RKRT1Zm7SGQYuEUiTxusOQ,577
34
34
  _mcp_mesh/pipeline/api_startup/api_pipeline.py,sha256=I9-Q0o2py5oAHZO2DJOeTD1uZo1-Dpn258k5Tr0dv9o,2474
35
35
  _mcp_mesh/pipeline/api_startup/api_server_setup.py,sha256=72oCMkCzRfxYrE5sfFJbr57BYJwRSyKxBMISTOHmKyc,14919
@@ -38,7 +38,7 @@ _mcp_mesh/pipeline/api_startup/middleware_integration.py,sha256=J7Ux_nJ1VsMqVzl5
38
38
  _mcp_mesh/pipeline/api_startup/route_collection.py,sha256=WPr4hRPLIWnNIJCoRHZ141ph9tAa_-Pm_j2TiCuWS4k,2002
39
39
  _mcp_mesh/pipeline/api_startup/route_integration.py,sha256=qq1AVaWna-CWEXyehyDL3EyeYKgo5aMtei8uBNdvkZ8,12448
40
40
  _mcp_mesh/pipeline/mcp_heartbeat/__init__.py,sha256=mhDcSquoHkhRItqgbM8iFfAKC2m7qMW_0smqtUgSl-w,389
41
- _mcp_mesh/pipeline/mcp_heartbeat/rust_heartbeat.py,sha256=Y0VWtgmfM_PINHoA5abjUAlyXrMdTkfr4c05NMWtA9M,26392
41
+ _mcp_mesh/pipeline/mcp_heartbeat/rust_heartbeat.py,sha256=9vKkmYE4YGr2vkXKKWExggIZbwMjkmSWSks0a4qw4cU,27699
42
42
  _mcp_mesh/pipeline/mcp_startup/__init__.py,sha256=qy960dnAoHLXMcL_y_rcro9Km2AoCVzC7_CxMwao564,1166
43
43
  _mcp_mesh/pipeline/mcp_startup/configuration.py,sha256=OnumIPRVBTne2OEU2VWLZovLKvWcNF9iJVQtlVwuim0,2805
44
44
  _mcp_mesh/pipeline/mcp_startup/decorator_collection.py,sha256=RHC6MHtfP9aP0hZ-IJjISZu72e0Pml3LU0qr7dc284w,2294
@@ -72,14 +72,14 @@ _mcp_mesh/tracing/context.py,sha256=2ozqKEYfx4Qxj64DnbwoVIbMkhNLbaV8BNWtkzAPA7I,
72
72
  _mcp_mesh/tracing/execution_tracer.py,sha256=dvM6QSbxk5s5rZZsCgVNaXWrr2J4_5I6XAfI1aM0sSA,10268
73
73
  _mcp_mesh/tracing/fastapi_tracing_middleware.py,sha256=FXjhA1A1Krk-ngyuOZPc1Ic4Llggv4Wide9OuPmkwCY,6959
74
74
  _mcp_mesh/tracing/redis_metadata_publisher.py,sha256=DeFrMt0ZX7k6393dH-xoRS2V5apPR-k80X8ZjrKBHMU,2890
75
- _mcp_mesh/tracing/trace_context_helper.py,sha256=A0UipvDExePaX-E-4SAp4M8n8uwed9PMo8gibXt1v_Q,6513
75
+ _mcp_mesh/tracing/trace_context_helper.py,sha256=Iy9FDXCzNjcXOHJ7iT9T98DC3brHQC2UYMLVTcgmZjs,6724
76
76
  _mcp_mesh/tracing/utils.py,sha256=GWwfvab0tYGr9QAe_zgZjZxgDKTTs0p5Mf8w6WJeWC0,4486
77
77
  _mcp_mesh/utils/fastmcp_schema_extractor.py,sha256=fttO1EABbf4GWKjE9V5DimwbhzGY9DbfGWQ2ak4SRnE,17264
78
78
  mesh/__init__.py,sha256=avMnUHkNAK7VgON2OhXkrFB290gr1HErghmTZpOXr-U,4207
79
- mesh/decorators.py,sha256=3h3tEhKWrlxBDYjP7vVM7iMPy7nx1oVSWVqzugC6WPM,67580
80
- mesh/helpers.py,sha256=1Y7V6aQvpV8BKfEeeKfjwPJ5g9FjMCzSNifs3se1jkA,12935
79
+ mesh/decorators.py,sha256=Xru9NoOolmdm-awGuuQkUgBb-s5bq9UF4p5QdVidAvI,71374
80
+ mesh/helpers.py,sha256=LM4eLwsBW6IlHaT5sPZkk39EN-CFK8mqCaxfgrWrsLw,15145
81
81
  mesh/types.py,sha256=vr0CKyPbP6lHgxj9kh_GMSLo3xkJ66PFPV_opfRb1H4,17772
82
- mcp_mesh-0.8.0b9.dist-info/METADATA,sha256=-M8ClhDxH5Y4v4lCg882IS6ebAzgNXTvG-nQrGG_Mzw,5040
83
- mcp_mesh-0.8.0b9.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
84
- mcp_mesh-0.8.0b9.dist-info/licenses/LICENSE,sha256=_EBQHRQThv9FPOLc5eFOUdeeRO0mYwChC7cx60dM1tM,1078
85
- mcp_mesh-0.8.0b9.dist-info/RECORD,,
82
+ mcp_mesh-0.9.0b1.dist-info/METADATA,sha256=f1jTrT-YTEZ8HiCGzuzyh_XJ3IEZWvFfYGXa1ZgM_T0,5142
83
+ mcp_mesh-0.9.0b1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
84
+ mcp_mesh-0.9.0b1.dist-info/licenses/LICENSE,sha256=_EBQHRQThv9FPOLc5eFOUdeeRO0mYwChC7cx60dM1tM,1078
85
+ mcp_mesh-0.9.0b1.dist-info/RECORD,,