universal-mcp 0.1.22rc1__py3-none-any.whl → 0.1.23rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/__init__.py +5 -0
- universal_mcp/applications/application.py +141 -15
- universal_mcp/integrations/integration.py +1 -1
- universal_mcp/servers/server.py +3 -24
- universal_mcp/tools/adapters.py +18 -3
- universal_mcp/tools/func_metadata.py +162 -85
- universal_mcp/tools/tools.py +11 -4
- universal_mcp/utils/agentr.py +29 -1
- universal_mcp/utils/docstring_parser.py +34 -52
- universal_mcp/utils/openapi/api_splitter.py +15 -0
- universal_mcp/utils/openapi/openapi.py +15 -20
- universal_mcp/utils/testing.py +31 -0
- {universal_mcp-0.1.22rc1.dist-info → universal_mcp-0.1.23rc1.dist-info}/METADATA +1 -1
- {universal_mcp-0.1.22rc1.dist-info → universal_mcp-0.1.23rc1.dist-info}/RECORD +17 -16
- {universal_mcp-0.1.22rc1.dist-info → universal_mcp-0.1.23rc1.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.22rc1.dist-info → universal_mcp-0.1.23rc1.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.22rc1.dist-info → universal_mcp-0.1.23rc1.dist-info}/licenses/LICENSE +0 -0
@@ -30,6 +30,8 @@ sys.path.append(str(UNIVERSAL_MCP_HOME))
|
|
30
30
|
# Name are in the format of "app-name", eg, google-calendar
|
31
31
|
# Class name is NameApp, eg, GoogleCalendarApp
|
32
32
|
|
33
|
+
app_cache: dict[str, type[BaseApplication]] = {}
|
34
|
+
|
33
35
|
|
34
36
|
def _install_or_upgrade_package(package_name: str, repository_path: str):
|
35
37
|
"""
|
@@ -71,6 +73,8 @@ def app_from_slug(slug: str):
|
|
71
73
|
Dynamically resolve and return the application class for the given slug.
|
72
74
|
Attempts installation from GitHub if the package is not found locally.
|
73
75
|
"""
|
76
|
+
if slug in app_cache:
|
77
|
+
return app_cache[slug]
|
74
78
|
class_name = get_default_class_name(slug)
|
75
79
|
module_path = get_default_module_path(slug)
|
76
80
|
package_name = get_default_package_name(slug)
|
@@ -81,6 +85,7 @@ def app_from_slug(slug: str):
|
|
81
85
|
module = importlib.import_module(module_path)
|
82
86
|
class_ = getattr(module, class_name)
|
83
87
|
logger.debug(f"Loaded class '{class_}' from module '{module_path}'")
|
88
|
+
app_cache[slug] = class_
|
84
89
|
return class_
|
85
90
|
except ModuleNotFoundError as e:
|
86
91
|
raise ModuleNotFoundError(f"Package '{module_path}' not found locally. Please install it first.") from e
|
@@ -149,6 +149,30 @@ class APIApplication(BaseApplication):
|
|
149
149
|
)
|
150
150
|
return self._client
|
151
151
|
|
152
|
+
def _handle_response(self, response: httpx.Response) -> dict[str, Any]:
|
153
|
+
"""
|
154
|
+
Handle API responses by checking for errors and parsing the response appropriately.
|
155
|
+
|
156
|
+
This method:
|
157
|
+
1. Checks for API errors and provides detailed error context including status code and response body
|
158
|
+
2. For successful responses, automatically parses JSON or returns success message
|
159
|
+
|
160
|
+
Args:
|
161
|
+
response: The HTTP response to process
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
dict[str, Any] | str: Parsed JSON data if response contains JSON,
|
165
|
+
otherwise a success message with status code
|
166
|
+
|
167
|
+
Raises:
|
168
|
+
httpx.HTTPStatusError: If the response indicates an error status, with full error details
|
169
|
+
"""
|
170
|
+
response.raise_for_status()
|
171
|
+
try:
|
172
|
+
return response.json()
|
173
|
+
except Exception:
|
174
|
+
return {"status": "success", "status_code": response.status_code, "text": response.text}
|
175
|
+
|
152
176
|
def _get(self, url: str, params: dict[str, Any] | None = None) -> httpx.Response:
|
153
177
|
"""
|
154
178
|
Make a GET request to the specified URL.
|
@@ -158,14 +182,13 @@ class APIApplication(BaseApplication):
|
|
158
182
|
params: Optional query parameters
|
159
183
|
|
160
184
|
Returns:
|
161
|
-
httpx.Response: The
|
185
|
+
httpx.Response: The raw HTTP response object
|
162
186
|
|
163
187
|
Raises:
|
164
|
-
httpx.
|
188
|
+
httpx.HTTPStatusError: If the request fails (when raise_for_status() is called)
|
165
189
|
"""
|
166
190
|
logger.debug(f"Making GET request to {url} with params: {params}")
|
167
191
|
response = self.client.get(url, params=params)
|
168
|
-
response.raise_for_status()
|
169
192
|
logger.debug(f"GET request successful with status code: {response.status_code}")
|
170
193
|
return response
|
171
194
|
|
@@ -193,10 +216,10 @@ class APIApplication(BaseApplication):
|
|
193
216
|
Example: {'file_field_name': ('filename.txt', open('file.txt', 'rb'), 'text/plain')}
|
194
217
|
|
195
218
|
Returns:
|
196
|
-
httpx.Response: The
|
219
|
+
httpx.Response: The raw HTTP response object
|
197
220
|
|
198
221
|
Raises:
|
199
|
-
httpx.
|
222
|
+
httpx.HTTPStatusError: If the request fails (when raise_for_status() is called)
|
200
223
|
"""
|
201
224
|
logger.debug(
|
202
225
|
f"Making POST request to {url} with params: {params}, data type: {type(data)}, content_type={content_type}, files: {'yes' if files else 'no'}"
|
@@ -235,7 +258,6 @@ class APIApplication(BaseApplication):
|
|
235
258
|
content=data, # Expect data to be bytes or str
|
236
259
|
params=params,
|
237
260
|
)
|
238
|
-
response.raise_for_status()
|
239
261
|
logger.debug(f"POST request successful with status code: {response.status_code}")
|
240
262
|
return response
|
241
263
|
|
@@ -263,10 +285,10 @@ class APIApplication(BaseApplication):
|
|
263
285
|
Example: {'file_field_name': ('filename.txt', open('file.txt', 'rb'), 'text/plain')}
|
264
286
|
|
265
287
|
Returns:
|
266
|
-
httpx.Response: The
|
288
|
+
httpx.Response: The raw HTTP response object
|
267
289
|
|
268
290
|
Raises:
|
269
|
-
httpx.
|
291
|
+
httpx.HTTPStatusError: If the request fails (when raise_for_status() is called)
|
270
292
|
"""
|
271
293
|
logger.debug(
|
272
294
|
f"Making PUT request to {url} with params: {params}, data type: {type(data)}, content_type={content_type}, files: {'yes' if files else 'no'}"
|
@@ -306,7 +328,6 @@ class APIApplication(BaseApplication):
|
|
306
328
|
content=data, # Expect data to be bytes or str
|
307
329
|
params=params,
|
308
330
|
)
|
309
|
-
response.raise_for_status()
|
310
331
|
logger.debug(f"PUT request successful with status code: {response.status_code}")
|
311
332
|
return response
|
312
333
|
|
@@ -319,14 +340,13 @@ class APIApplication(BaseApplication):
|
|
319
340
|
params: Optional query parameters
|
320
341
|
|
321
342
|
Returns:
|
322
|
-
httpx.Response: The
|
343
|
+
httpx.Response: The raw HTTP response object
|
323
344
|
|
324
345
|
Raises:
|
325
|
-
httpx.
|
346
|
+
httpx.HTTPStatusError: If the request fails (when raise_for_status() is called)
|
326
347
|
"""
|
327
348
|
logger.debug(f"Making DELETE request to {url} with params: {params}")
|
328
349
|
response = self.client.delete(url, params=params, timeout=self.default_timeout)
|
329
|
-
response.raise_for_status()
|
330
350
|
logger.debug(f"DELETE request successful with status code: {response.status_code}")
|
331
351
|
return response
|
332
352
|
|
@@ -340,10 +360,10 @@ class APIApplication(BaseApplication):
|
|
340
360
|
params: Optional query parameters
|
341
361
|
|
342
362
|
Returns:
|
343
|
-
httpx.Response: The
|
363
|
+
httpx.Response: The raw HTTP response object
|
344
364
|
|
345
365
|
Raises:
|
346
|
-
httpx.
|
366
|
+
httpx.HTTPStatusError: If the request fails (when raise_for_status() is called)
|
347
367
|
"""
|
348
368
|
logger.debug(f"Making PATCH request to {url} with params: {params} and data: {data}")
|
349
369
|
response = self.client.patch(
|
@@ -351,10 +371,116 @@ class APIApplication(BaseApplication):
|
|
351
371
|
json=data,
|
352
372
|
params=params,
|
353
373
|
)
|
354
|
-
response.raise_for_status()
|
355
374
|
logger.debug(f"PATCH request successful with status code: {response.status_code}")
|
356
375
|
return response
|
357
376
|
|
377
|
+
# New convenience methods that handle responses automatically with enhanced error handling
|
378
|
+
def _get_json(self, url: str, params: dict[str, Any] | None = None) -> dict[str, Any] | str:
|
379
|
+
"""
|
380
|
+
Make a GET request and automatically handle the response with enhanced error handling.
|
381
|
+
|
382
|
+
Args:
|
383
|
+
url: The URL to send the request to
|
384
|
+
params: Optional query parameters
|
385
|
+
|
386
|
+
Returns:
|
387
|
+
dict[str, Any] | str: Parsed JSON response if available, otherwise success message
|
388
|
+
|
389
|
+
Raises:
|
390
|
+
httpx.HTTPStatusError: If the request fails with detailed error information including response body
|
391
|
+
"""
|
392
|
+
response = self._get(url, params)
|
393
|
+
return self._handle_response(response)
|
394
|
+
|
395
|
+
def _post_json(
|
396
|
+
self,
|
397
|
+
url: str,
|
398
|
+
data: Any,
|
399
|
+
params: dict[str, Any] | None = None,
|
400
|
+
content_type: str = "application/json",
|
401
|
+
files: dict[str, Any] | None = None,
|
402
|
+
) -> dict[str, Any] | str:
|
403
|
+
"""
|
404
|
+
Make a POST request and automatically handle the response with enhanced error handling.
|
405
|
+
|
406
|
+
Args:
|
407
|
+
url: The URL to send the request to
|
408
|
+
data: The data to send
|
409
|
+
params: Optional query parameters
|
410
|
+
content_type: The Content-Type of the request body
|
411
|
+
files: Optional dictionary of files to upload
|
412
|
+
|
413
|
+
Returns:
|
414
|
+
dict[str, Any] | str: Parsed JSON response if available, otherwise success message
|
415
|
+
|
416
|
+
Raises:
|
417
|
+
httpx.HTTPStatusError: If the request fails with detailed error information including response body
|
418
|
+
"""
|
419
|
+
response = self._post(url, data, params, content_type, files)
|
420
|
+
return self._handle_response(response)
|
421
|
+
|
422
|
+
def _put_json(
|
423
|
+
self,
|
424
|
+
url: str,
|
425
|
+
data: Any,
|
426
|
+
params: dict[str, Any] | None = None,
|
427
|
+
content_type: str = "application/json",
|
428
|
+
files: dict[str, Any] | None = None,
|
429
|
+
) -> dict[str, Any] | str:
|
430
|
+
"""
|
431
|
+
Make a PUT request and automatically handle the response with enhanced error handling.
|
432
|
+
|
433
|
+
Args:
|
434
|
+
url: The URL to send the request to
|
435
|
+
data: The data to send
|
436
|
+
params: Optional query parameters
|
437
|
+
content_type: The Content-Type of the request body
|
438
|
+
files: Optional dictionary of files to upload
|
439
|
+
|
440
|
+
Returns:
|
441
|
+
dict[str, Any] | str: Parsed JSON response if available, otherwise success message
|
442
|
+
|
443
|
+
Raises:
|
444
|
+
httpx.HTTPStatusError: If the request fails with detailed error information including response body
|
445
|
+
"""
|
446
|
+
response = self._put(url, data, params, content_type, files)
|
447
|
+
return self._handle_response(response)
|
448
|
+
|
449
|
+
def _delete_json(self, url: str, params: dict[str, Any] | None = None) -> dict[str, Any] | str:
|
450
|
+
"""
|
451
|
+
Make a DELETE request and automatically handle the response with enhanced error handling.
|
452
|
+
|
453
|
+
Args:
|
454
|
+
url: The URL to send the request to
|
455
|
+
params: Optional query parameters
|
456
|
+
|
457
|
+
Returns:
|
458
|
+
dict[str, Any] | str: Parsed JSON response if available, otherwise success message
|
459
|
+
|
460
|
+
Raises:
|
461
|
+
httpx.HTTPStatusError: If the request fails with detailed error information including response body
|
462
|
+
"""
|
463
|
+
response = self._delete(url, params)
|
464
|
+
return self._handle_response(response)
|
465
|
+
|
466
|
+
def _patch_json(self, url: str, data: dict[str, Any], params: dict[str, Any] | None = None) -> dict[str, Any] | str:
|
467
|
+
"""
|
468
|
+
Make a PATCH request and automatically handle the response with enhanced error handling.
|
469
|
+
|
470
|
+
Args:
|
471
|
+
url: The URL to send the request to
|
472
|
+
data: The data to send in the request body
|
473
|
+
params: Optional query parameters
|
474
|
+
|
475
|
+
Returns:
|
476
|
+
dict[str, Any] | str: Parsed JSON response if available, otherwise success message
|
477
|
+
|
478
|
+
Raises:
|
479
|
+
httpx.HTTPStatusError: If the request fails with detailed error information including response body
|
480
|
+
"""
|
481
|
+
response = self._patch(url, data, params)
|
482
|
+
return self._handle_response(response)
|
483
|
+
|
358
484
|
|
359
485
|
class GraphQLApplication(BaseApplication):
|
360
486
|
"""
|
@@ -325,7 +325,7 @@ class AgentRIntegration(Integration):
|
|
325
325
|
ValueError: If no API key is provided or found in environment variables
|
326
326
|
"""
|
327
327
|
|
328
|
-
def __init__(self, name: str, api_key: str, **kwargs):
|
328
|
+
def __init__(self, name: str, api_key: str | None = None, **kwargs):
|
329
329
|
super().__init__(name, **kwargs)
|
330
330
|
self.client = AgentrClient(api_key=api_key)
|
331
331
|
self._credentials = None
|
universal_mcp/servers/server.py
CHANGED
@@ -4,7 +4,6 @@ from typing import Any
|
|
4
4
|
import httpx
|
5
5
|
from loguru import logger
|
6
6
|
from mcp.server.fastmcp import FastMCP
|
7
|
-
from mcp.server.fastmcp.server import MCPTool
|
8
7
|
from mcp.types import TextContent
|
9
8
|
from pydantic import ValidationError
|
10
9
|
|
@@ -203,23 +202,13 @@ class AgentRServer(BaseServer):
|
|
203
202
|
"""
|
204
203
|
|
205
204
|
def __init__(self, config: ServerConfig, **kwargs):
|
206
|
-
|
205
|
+
super().__init__(config, **kwargs)
|
207
206
|
self.api_key = config.api_key.get_secret_value() if config.api_key else None
|
208
207
|
if not self.api_key:
|
209
208
|
raise ValueError("API key is required for AgentR server")
|
210
|
-
|
211
209
|
logger.info(f"Initializing AgentR server with API key: {self.api_key}")
|
212
210
|
self.client = AgentrClient(api_key=self.api_key)
|
213
|
-
|
214
|
-
self.integration = AgentRIntegration(name="agentr", api_key=self.api_key)
|
215
|
-
# Don't load apps in __init__ for stateless operation
|
216
|
-
self._apps_loaded = False
|
217
|
-
|
218
|
-
def _ensure_apps_loaded(self) -> None:
|
219
|
-
"""Ensure apps are loaded, loading them if necessary."""
|
220
|
-
if not self._apps_loaded:
|
221
|
-
self._load_apps()
|
222
|
-
self._apps_loaded = True
|
211
|
+
self._load_apps()
|
223
212
|
|
224
213
|
def _fetch_apps(self) -> list[AppConfig]:
|
225
214
|
"""Fetch available apps from AgentR API with retry logic.
|
@@ -256,7 +245,7 @@ class AgentRServer(BaseServer):
|
|
256
245
|
"""
|
257
246
|
try:
|
258
247
|
integration = (
|
259
|
-
AgentRIntegration(name=app_config.integration.name, api_key=self.
|
248
|
+
AgentRIntegration(name=app_config.integration.name, api_key=self.api_key)
|
260
249
|
if app_config.integration
|
261
250
|
else None
|
262
251
|
)
|
@@ -292,16 +281,6 @@ class AgentRServer(BaseServer):
|
|
292
281
|
# Don't raise the exception to allow server to start with partial functionality
|
293
282
|
logger.warning("Server will start with limited functionality due to app loading failures")
|
294
283
|
|
295
|
-
async def list_tools(self) -> list[MCPTool]:
|
296
|
-
"""List available tools, ensuring apps are loaded first."""
|
297
|
-
self._ensure_apps_loaded()
|
298
|
-
return await super().list_tools()
|
299
|
-
|
300
|
-
async def call_tool(self, name: str, arguments: dict) -> list[TextContent]:
|
301
|
-
"""Call a tool by name, ensuring apps are loaded first."""
|
302
|
-
self._ensure_apps_loaded()
|
303
|
-
return await super().call_tool(name, arguments)
|
304
|
-
|
305
284
|
|
306
285
|
class SingleMCPServer(BaseServer):
|
307
286
|
"""
|
universal_mcp/tools/adapters.py
CHANGED
@@ -19,11 +19,14 @@ def convert_tool_to_mcp_tool(
|
|
19
19
|
):
|
20
20
|
from mcp.server.fastmcp.server import MCPTool
|
21
21
|
|
22
|
-
|
22
|
+
logger.debug(f"Converting tool '{tool.name}' to MCP format")
|
23
|
+
mcp_tool = MCPTool(
|
23
24
|
name=tool.name[:63],
|
24
25
|
description=tool.description or "",
|
25
26
|
inputSchema=tool.parameters,
|
26
27
|
)
|
28
|
+
logger.debug(f"Successfully converted tool '{tool.name}' to MCP format")
|
29
|
+
return mcp_tool
|
27
30
|
|
28
31
|
|
29
32
|
def format_to_mcp_result(result: any) -> list[TextContent]:
|
@@ -35,9 +38,12 @@ def format_to_mcp_result(result: any) -> list[TextContent]:
|
|
35
38
|
Returns:
|
36
39
|
List of TextContent objects
|
37
40
|
"""
|
41
|
+
logger.debug(f"Formatting result to MCP format, type: {type(result)}")
|
38
42
|
if isinstance(result, str):
|
43
|
+
logger.debug("Result is string, wrapping in TextContent")
|
39
44
|
return [TextContent(type="text", text=result)]
|
40
45
|
elif isinstance(result, list) and all(isinstance(item, TextContent) for item in result):
|
46
|
+
logger.debug("Result is already list of TextContent objects")
|
41
47
|
return result
|
42
48
|
else:
|
43
49
|
logger.warning(f"Tool returned unexpected type: {type(result)}. Wrapping in TextContent.")
|
@@ -60,26 +66,33 @@ def convert_tool_to_langchain_tool(
|
|
60
66
|
a LangChain tool
|
61
67
|
"""
|
62
68
|
|
69
|
+
logger.debug(f"Converting tool '{tool.name}' to LangChain format")
|
70
|
+
|
63
71
|
async def call_tool(
|
64
72
|
**arguments: dict[str, any],
|
65
73
|
):
|
74
|
+
logger.debug(f"Executing LangChain tool '{tool.name}' with arguments: {arguments}")
|
66
75
|
call_tool_result = await tool.run(arguments)
|
76
|
+
logger.debug(f"Tool '{tool.name}' execution completed")
|
67
77
|
return call_tool_result
|
68
78
|
|
69
|
-
|
79
|
+
langchain_tool = StructuredTool(
|
70
80
|
name=tool.name,
|
71
81
|
description=tool.description or "",
|
72
82
|
coroutine=call_tool,
|
73
83
|
response_format="content",
|
74
84
|
args_schema=tool.parameters,
|
75
85
|
)
|
86
|
+
logger.debug(f"Successfully converted tool '{tool.name}' to LangChain format")
|
87
|
+
return langchain_tool
|
76
88
|
|
77
89
|
|
78
90
|
def convert_tool_to_openai_tool(
|
79
91
|
tool: Tool,
|
80
92
|
):
|
81
93
|
"""Convert a Tool object to an OpenAI function."""
|
82
|
-
|
94
|
+
logger.debug(f"Converting tool '{tool.name}' to OpenAI format")
|
95
|
+
openai_tool = {
|
83
96
|
"type": "function",
|
84
97
|
"function": {
|
85
98
|
"name": tool.name,
|
@@ -87,3 +100,5 @@ def convert_tool_to_openai_tool(
|
|
87
100
|
"parameters": tool.parameters,
|
88
101
|
},
|
89
102
|
}
|
103
|
+
logger.debug(f"Successfully converted tool '{tool.name}' to OpenAI format")
|
104
|
+
return openai_tool
|
@@ -14,6 +14,55 @@ from pydantic.fields import FieldInfo
|
|
14
14
|
from pydantic_core import PydanticUndefined
|
15
15
|
|
16
16
|
|
17
|
+
def _map_docstring_type_to_python_type(type_str: str | None) -> Any:
|
18
|
+
"""Maps common docstring type strings to Python types."""
|
19
|
+
if not type_str:
|
20
|
+
return Any
|
21
|
+
type_str_lower = type_str.lower()
|
22
|
+
mapping = {
|
23
|
+
"str": str,
|
24
|
+
"string": str,
|
25
|
+
"int": int,
|
26
|
+
"integer": int,
|
27
|
+
"float": float,
|
28
|
+
"number": float,
|
29
|
+
"bool": bool,
|
30
|
+
"boolean": bool,
|
31
|
+
"list": list,
|
32
|
+
"array": list,
|
33
|
+
"dict": dict,
|
34
|
+
"object": dict,
|
35
|
+
"any": Any,
|
36
|
+
}
|
37
|
+
return mapping.get(type_str_lower, Any)
|
38
|
+
|
39
|
+
|
40
|
+
def _map_docstring_type_to_schema_type(type_str: str | None) -> str:
|
41
|
+
"""Maps common docstring type strings to JSON schema type strings."""
|
42
|
+
# This function might not be strictly needed if Pydantic correctly infers
|
43
|
+
# schema types from Python types, but kept for explicitness if used.
|
44
|
+
# The primary use-case now is for json_schema_extra for untyped Any.
|
45
|
+
if not type_str:
|
46
|
+
return "string"
|
47
|
+
type_str_lower = type_str.lower()
|
48
|
+
mapping = {
|
49
|
+
"str": "string",
|
50
|
+
"string": "string",
|
51
|
+
"int": "integer",
|
52
|
+
"integer": "integer",
|
53
|
+
"float": "number",
|
54
|
+
"number": "number",
|
55
|
+
"bool": "boolean",
|
56
|
+
"boolean": "boolean",
|
57
|
+
"list": "array",
|
58
|
+
"array": "array",
|
59
|
+
"dict": "object",
|
60
|
+
"object": "object",
|
61
|
+
"any": "string",
|
62
|
+
}
|
63
|
+
return mapping.get(type_str_lower, "string")
|
64
|
+
|
65
|
+
|
17
66
|
def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
|
18
67
|
def try_eval_type(value: Any, globalns: dict[str, Any], localns: dict[str, Any]) -> tuple[Any, bool]:
|
19
68
|
try:
|
@@ -25,8 +74,6 @@ def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
|
|
25
74
|
annotation = ForwardRef(annotation)
|
26
75
|
annotation, status = try_eval_type(annotation, globalns, globalns)
|
27
76
|
|
28
|
-
# This check and raise could perhaps be skipped, and we (FastMCP) just call
|
29
|
-
# model_rebuild right before using it 🤷
|
30
77
|
if status is False:
|
31
78
|
raise InvalidSignature(f"Unable to evaluate type annotation {annotation}")
|
32
79
|
|
@@ -34,7 +81,6 @@ def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
|
|
34
81
|
|
35
82
|
|
36
83
|
def _get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
|
37
|
-
"""Get function signature while evaluating forward references"""
|
38
84
|
signature = inspect.signature(call)
|
39
85
|
globalns = getattr(call, "__globals__", {})
|
40
86
|
typed_params = [
|
@@ -51,13 +97,7 @@ def _get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
|
|
51
97
|
|
52
98
|
|
53
99
|
class ArgModelBase(BaseModel):
|
54
|
-
"""A model representing the arguments to a function."""
|
55
|
-
|
56
100
|
def model_dump_one_level(self) -> dict[str, Any]:
|
57
|
-
"""Return a dict of the model's fields, one level deep.
|
58
|
-
|
59
|
-
That is, sub-models etc are not dumped - they are kept as pydantic models.
|
60
|
-
"""
|
61
101
|
kwargs: dict[str, Any] = {}
|
62
102
|
for field_name in self.__class__.model_fields:
|
63
103
|
kwargs[field_name] = getattr(self, field_name)
|
@@ -70,9 +110,6 @@ class ArgModelBase(BaseModel):
|
|
70
110
|
|
71
111
|
class FuncMetadata(BaseModel):
|
72
112
|
arg_model: Annotated[type[ArgModelBase], WithJsonSchema(None)]
|
73
|
-
# We can add things in the future like
|
74
|
-
# - Maybe some args are excluded from attempting to parse from JSON
|
75
|
-
# - Maybe some args are special (like context) for dependency injection
|
76
113
|
|
77
114
|
async def call_fn_with_arg_validation(
|
78
115
|
self,
|
@@ -82,11 +119,6 @@ class FuncMetadata(BaseModel):
|
|
82
119
|
arguments_to_pass_directly: dict[str, Any] | None,
|
83
120
|
context: dict[str, Any] | None = None,
|
84
121
|
) -> Any:
|
85
|
-
"""Call the given function with arguments validated and injected.
|
86
|
-
|
87
|
-
Arguments are first attempted to be parsed from JSON, then validated against
|
88
|
-
the argument model, before being passed to the function.
|
89
|
-
"""
|
90
122
|
arguments_pre_parsed = self.pre_parse_json(arguments_to_validate)
|
91
123
|
arguments_parsed_model = self.arg_model.model_validate(arguments_pre_parsed)
|
92
124
|
arguments_parsed_dict = arguments_parsed_model.model_dump_one_level()
|
@@ -102,17 +134,7 @@ class FuncMetadata(BaseModel):
|
|
102
134
|
raise TypeError("fn must be either Callable or Awaitable")
|
103
135
|
|
104
136
|
def pre_parse_json(self, data: dict[str, Any]) -> dict[str, Any]:
|
105
|
-
|
106
|
-
|
107
|
-
Return a dict with same keys as input but with values parsed from JSON
|
108
|
-
if appropriate.
|
109
|
-
|
110
|
-
This is to handle cases like `["a", "b", "c"]` being passed in as JSON inside
|
111
|
-
a string rather than an actual list. Claude desktop is prone to this - in fact
|
112
|
-
it seems incapable of NOT doing this. For sub-models, it tends to pass
|
113
|
-
dicts (JSON objects) as JSON strings, which can be pre-parsed here.
|
114
|
-
"""
|
115
|
-
new_data = data.copy() # Shallow copy
|
137
|
+
new_data = data.copy()
|
116
138
|
for field_name, _field_info in self.arg_model.model_fields.items():
|
117
139
|
if field_name not in data:
|
118
140
|
continue
|
@@ -120,11 +142,8 @@ class FuncMetadata(BaseModel):
|
|
120
142
|
try:
|
121
143
|
pre_parsed = json.loads(data[field_name])
|
122
144
|
except json.JSONDecodeError:
|
123
|
-
continue
|
145
|
+
continue
|
124
146
|
if isinstance(pre_parsed, str | int | float):
|
125
|
-
# This is likely that the raw value is e.g. `"hello"` which we
|
126
|
-
# Should really be parsed as '"hello"' in Python - but if we parse
|
127
|
-
# it as JSON it'll turn into just 'hello'. So we skip it.
|
128
147
|
continue
|
129
148
|
new_data[field_name] = pre_parsed
|
130
149
|
assert new_data.keys() == data.keys()
|
@@ -139,73 +158,131 @@ class FuncMetadata(BaseModel):
|
|
139
158
|
cls,
|
140
159
|
func: Callable[..., Any],
|
141
160
|
skip_names: Sequence[str] = (),
|
142
|
-
arg_description: dict[str, str] | None = None,
|
161
|
+
arg_description: dict[str, dict[str, str | None]] | None = None,
|
143
162
|
) -> "FuncMetadata":
|
144
|
-
"""Given a function, return metadata including a pydantic model representing its
|
145
|
-
signature.
|
146
|
-
|
147
|
-
The use case for this is
|
148
|
-
```
|
149
|
-
meta = func_to_pyd(func)
|
150
|
-
validated_args = meta.arg_model.model_validate(some_raw_data_dict)
|
151
|
-
return func(**validated_args.model_dump_one_level())
|
152
|
-
```
|
153
|
-
|
154
|
-
**critically** it also provides pre-parse helper to attempt to parse things from
|
155
|
-
JSON.
|
156
|
-
|
157
|
-
Args:
|
158
|
-
func: The function to convert to a pydantic model
|
159
|
-
skip_names: A list of parameter names to skip. These will not be included in
|
160
|
-
the model.
|
161
|
-
Returns:
|
162
|
-
A pydantic model representing the function's signature.
|
163
|
-
"""
|
164
163
|
sig = _get_typed_signature(func)
|
165
164
|
params = sig.parameters
|
166
165
|
dynamic_pydantic_model_params: dict[str, Any] = {}
|
167
166
|
globalns = getattr(func, "__globals__", {})
|
167
|
+
arg_description_map = arg_description or {}
|
168
|
+
|
168
169
|
for param in params.values():
|
169
170
|
if param.name.startswith("_"):
|
170
171
|
raise InvalidSignature(f"Parameter {param.name} of {func.__name__} cannot start with '_'")
|
171
172
|
if param.name in skip_names:
|
172
173
|
continue
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
if
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
174
|
+
|
175
|
+
sig_annotation = param.annotation
|
176
|
+
default_val = param.default if param.default is not inspect.Parameter.empty else PydanticUndefined
|
177
|
+
|
178
|
+
param_doc_info = arg_description_map.get(param.name, {})
|
179
|
+
docstring_description = param_doc_info.get("description")
|
180
|
+
docstring_type_str = param_doc_info.get("type_str")
|
181
|
+
|
182
|
+
annotation_for_field_builder: Any
|
183
|
+
|
184
|
+
if sig_annotation is None:
|
185
|
+
annotation_for_field_builder = type(None)
|
186
|
+
elif sig_annotation is inspect.Parameter.empty:
|
187
|
+
py_type_from_doc = _map_docstring_type_to_python_type(docstring_type_str)
|
188
|
+
|
189
|
+
if py_type_from_doc is Any and not docstring_type_str:
|
190
|
+
schema_type_for_any = _map_docstring_type_to_schema_type(docstring_type_str)
|
191
|
+
annotation_for_field_builder = Annotated[
|
192
|
+
Any, Field(json_schema_extra={"type": schema_type_for_any})
|
193
|
+
]
|
194
|
+
else:
|
195
|
+
annotation_for_field_builder = py_type_from_doc
|
196
|
+
else: # Parameter has a type hint in the signature
|
197
|
+
annotation_for_field_builder = _get_typed_annotation(sig_annotation, globalns)
|
198
|
+
|
199
|
+
field_info = FieldInfo.from_annotated_attribute(annotation_for_field_builder, default_val)
|
200
|
+
|
201
|
+
if field_info.description is None and docstring_description:
|
202
|
+
field_info.description = docstring_description
|
203
|
+
|
204
|
+
if field_info.title is None:
|
196
205
|
field_info.title = param.name
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
field_info,
|
202
|
-
)
|
203
|
-
continue
|
206
|
+
|
207
|
+
core_type_for_model = field_info.annotation
|
208
|
+
|
209
|
+
dynamic_pydantic_model_params[param.name] = (core_type_for_model, field_info)
|
204
210
|
|
205
211
|
arguments_model = create_model(
|
206
212
|
f"{func.__name__}Arguments",
|
207
213
|
**dynamic_pydantic_model_params,
|
208
214
|
__base__=ArgModelBase,
|
209
215
|
)
|
210
|
-
|
211
|
-
|
216
|
+
return FuncMetadata(arg_model=arguments_model)
|
217
|
+
|
218
|
+
|
219
|
+
if __name__ == "__main__":
|
220
|
+
import sys
|
221
|
+
from pathlib import Path
|
222
|
+
|
223
|
+
current_file = Path(__file__).resolve()
|
224
|
+
package_source_parent_dir = current_file.parent.parent.parent
|
225
|
+
|
226
|
+
if str(package_source_parent_dir) not in sys.path:
|
227
|
+
sys.path.insert(0, str(package_source_parent_dir))
|
228
|
+
print(f"DEBUG: Added to sys.path: {package_source_parent_dir}")
|
229
|
+
|
230
|
+
from universal_mcp.utils.docstring_parser import parse_docstring
|
231
|
+
|
232
|
+
def post_crm_v_objects_emails_create(self, associations, properties) -> dict[str, Any]:
|
233
|
+
"""
|
234
|
+
|
235
|
+
Creates an email object in the CRM using the POST method, allowing for the association of metadata with the email and requiring authentication via OAuth2 or private apps to access the necessary permissions.
|
236
|
+
|
237
|
+
Args:
|
238
|
+
associations (array): associations Example: [{Category': 'HUBSPOT_DEFINED', 'associationTypeId': 2}]}].
|
239
|
+
properties (object): No description provided. Example: "{'ncy': 'monthly'}".
|
240
|
+
|
241
|
+
Returns:
|
242
|
+
dict[str, Any]: successful operation
|
243
|
+
|
244
|
+
Raises:
|
245
|
+
HTTPError: Raised when the API request fails (e.g., non-2XX status code).
|
246
|
+
JSONDecodeError: Raised if the response body cannot be parsed as JSON.
|
247
|
+
|
248
|
+
Tags:
|
249
|
+
Basic
|
250
|
+
"""
|
251
|
+
request_body_data = None
|
252
|
+
request_body_data = {"associations": associations, "properties": properties}
|
253
|
+
request_body_data = {k: v for k, v in request_body_data.items() if v is not None}
|
254
|
+
url = f"{self.main_app_client.base_url}/crm/v3/objects/emails"
|
255
|
+
query_params = {}
|
256
|
+
response = self._post(url, data=request_body_data, params=query_params, content_type="application/json")
|
257
|
+
response.raise_for_status()
|
258
|
+
if response.status_code == 204 or not response.content or (not response.text.strip()):
|
259
|
+
return None
|
260
|
+
try:
|
261
|
+
return response.json()
|
262
|
+
except ValueError:
|
263
|
+
return None
|
264
|
+
|
265
|
+
print("--- Testing FuncMetadata with get_weather function ---")
|
266
|
+
|
267
|
+
raw_doc = inspect.getdoc(post_crm_v_objects_emails_create)
|
268
|
+
parsed_doc_info = parse_docstring(raw_doc)
|
269
|
+
arg_descriptions_from_doc = parsed_doc_info.get("args", {}) # Extract just the args part
|
270
|
+
|
271
|
+
print("\n1. Parsed Argument Descriptions from Docstring (for FuncMetadata input):")
|
272
|
+
print(json.dumps(arg_descriptions_from_doc, indent=2))
|
273
|
+
|
274
|
+
# 2. Create FuncMetadata instance
|
275
|
+
# The arg_description parameter expects a dict mapping arg name to its details
|
276
|
+
func_arg_metadata_instance = FuncMetadata.func_metadata(
|
277
|
+
post_crm_v_objects_emails_create, arg_description=arg_descriptions_from_doc
|
278
|
+
)
|
279
|
+
|
280
|
+
print("\n2. FuncMetadata Instance (its __repr__):")
|
281
|
+
print(func_arg_metadata_instance)
|
282
|
+
|
283
|
+
# 3. Get and print the JSON schema for the arguments model
|
284
|
+
parameters_schema = func_arg_metadata_instance.arg_model.model_json_schema()
|
285
|
+
print("\n3. Generated JSON Schema for Parameters (from arg_model.model_json_schema()):")
|
286
|
+
print(json.dumps(parameters_schema, indent=2))
|
287
|
+
|
288
|
+
print("\n--- Test Complete ---")
|
universal_mcp/tools/tools.py
CHANGED
@@ -53,11 +53,17 @@ class Tool(BaseModel):
|
|
53
53
|
func_arg_metadata = FuncMetadata.func_metadata(fn, arg_description=parsed_doc["args"])
|
54
54
|
parameters = func_arg_metadata.arg_model.model_json_schema()
|
55
55
|
|
56
|
+
simple_args_descriptions: dict[str, str] = {}
|
57
|
+
if parsed_doc.get("args"):
|
58
|
+
for arg_name, arg_details in parsed_doc["args"].items():
|
59
|
+
if isinstance(arg_details, dict):
|
60
|
+
simple_args_descriptions[arg_name] = arg_details.get("description") or ""
|
61
|
+
|
56
62
|
return cls(
|
57
63
|
fn=fn,
|
58
|
-
name=func_name
|
64
|
+
name=func_name,
|
59
65
|
description=parsed_doc["summary"],
|
60
|
-
args_description=
|
66
|
+
args_description=simple_args_descriptions,
|
61
67
|
returns_description=parsed_doc["returns"],
|
62
68
|
raises_description=parsed_doc["raises"],
|
63
69
|
tags=parsed_doc["tags"],
|
@@ -79,8 +85,9 @@ class Tool(BaseModel):
|
|
79
85
|
except NotAuthorizedError as e:
|
80
86
|
message = f"Not authorized to call tool {self.name}: {e.message}"
|
81
87
|
return message
|
82
|
-
except httpx.
|
83
|
-
|
88
|
+
except httpx.HTTPStatusError as e:
|
89
|
+
error_body = e.response.text or "<empty response>"
|
90
|
+
message = f"HTTP {e.response.status_code}: {error_body}"
|
84
91
|
raise ToolError(message) from e
|
85
92
|
except ValueError as e:
|
86
93
|
message = f"Invalid arguments for tool {self.name}: {e}"
|
universal_mcp/utils/agentr.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
import os
|
2
|
+
|
1
3
|
import httpx
|
2
4
|
from loguru import logger
|
3
5
|
|
@@ -18,7 +20,9 @@ class AgentrClient:
|
|
18
20
|
|
19
21
|
def __init__(self, api_key: str, base_url: str = "https://api.agentr.dev"):
|
20
22
|
self.base_url = base_url.rstrip("/")
|
21
|
-
self.api_key = api_key
|
23
|
+
self.api_key = api_key or os.getenv("AGENTR_API_KEY")
|
24
|
+
if not self.api_key:
|
25
|
+
raise ValueError("No API key provided and AGENTR_API_KEY not found in environment variables")
|
22
26
|
self.client = httpx.Client(
|
23
27
|
base_url=self.base_url, headers={"X-API-KEY": self.api_key}, timeout=30, follow_redirects=True
|
24
28
|
)
|
@@ -78,3 +82,27 @@ class AgentrClient:
|
|
78
82
|
response.raise_for_status()
|
79
83
|
data = response.json()
|
80
84
|
return [AppConfig.model_validate(app) for app in data]
|
85
|
+
|
86
|
+
def list_all_apps(self) -> list:
|
87
|
+
"""List all apps from AgentR API.
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
List of app names
|
91
|
+
"""
|
92
|
+
response = self.client.get("/apps/")
|
93
|
+
response.raise_for_status()
|
94
|
+
return response.json()
|
95
|
+
|
96
|
+
def list_actions(self, app_name: str):
|
97
|
+
"""List actions for an app.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
app_name (str): Name of the app to list actions for
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
List of action configurations
|
104
|
+
"""
|
105
|
+
|
106
|
+
response = self.client.get(f"/apps/{app_name}/actions/")
|
107
|
+
response.raise_for_status()
|
108
|
+
return response.json()
|
@@ -9,7 +9,8 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
9
9
|
|
10
10
|
Supports multi-line descriptions for each section. Recognizes common section
|
11
11
|
headers like 'Args:', 'Returns:', 'Raises:', 'Tags:', etc. Also attempts
|
12
|
-
to parse key-value pairs within 'Args:' and 'Raises:' sections
|
12
|
+
to parse key-value pairs within 'Args:' and 'Raises:' sections, including
|
13
|
+
type information for arguments if present in the docstring.
|
13
14
|
|
14
15
|
Args:
|
15
16
|
docstring: The docstring string to parse, or None.
|
@@ -17,7 +18,9 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
17
18
|
Returns:
|
18
19
|
A dictionary containing the parsed components:
|
19
20
|
- 'summary': The first paragraph of the docstring.
|
20
|
-
- 'args': A dictionary mapping argument names to their
|
21
|
+
- 'args': A dictionary mapping argument names to their details,
|
22
|
+
including 'description' and 'type_str' (if found).
|
23
|
+
Example: {"param_name": {"description": "desc...", "type_str": "str"}}
|
21
24
|
- 'returns': The description of the return value.
|
22
25
|
- 'raises': A dictionary mapping exception types to their descriptions.
|
23
26
|
- 'tags': A list of strings found in the 'Tags:' section.
|
@@ -31,7 +34,7 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
31
34
|
|
32
35
|
summary: str = ""
|
33
36
|
summary_lines: list[str] = []
|
34
|
-
args: dict[str, str] = {}
|
37
|
+
args: dict[str, dict[str, str | None]] = {}
|
35
38
|
returns: str = ""
|
36
39
|
raises: dict[str, str] = {}
|
37
40
|
tags: list[str] = []
|
@@ -39,17 +42,20 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
39
42
|
current_section: str | None = None
|
40
43
|
current_key: str | None = None
|
41
44
|
current_desc_lines: list[str] = []
|
45
|
+
current_arg_type_str: str | None = None
|
42
46
|
|
43
|
-
key_pattern = re.compile(r"^\s*([\w\.]+)\s*(?:\(
|
47
|
+
key_pattern = re.compile(r"^\s*([\w\.]+)\s*(?:\((.*?)\))?:\s*(.*)")
|
44
48
|
|
45
49
|
def finalize_current_item():
|
46
|
-
|
47
|
-
nonlocal returns, tags, args, raises
|
50
|
+
nonlocal returns, tags, args, raises, current_arg_type_str
|
48
51
|
desc = " ".join(current_desc_lines).strip()
|
49
52
|
|
50
53
|
if current_section == "args" and current_key:
|
51
|
-
if desc:
|
52
|
-
args[current_key] =
|
54
|
+
if desc or current_arg_type_str:
|
55
|
+
args[current_key] = {
|
56
|
+
"description": desc,
|
57
|
+
"type_str": current_arg_type_str,
|
58
|
+
}
|
53
59
|
elif current_section == "raises" and current_key:
|
54
60
|
if desc:
|
55
61
|
raises[current_key] = desc
|
@@ -61,7 +67,6 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
61
67
|
tags.extend([tag.strip() for tag in desc.split(",") if tag.strip()])
|
62
68
|
|
63
69
|
def check_for_section_header(line: str) -> tuple[bool, str | None, str]:
|
64
|
-
"""Checks if a line is a recognized section header."""
|
65
70
|
stripped_lower = line.strip().lower()
|
66
71
|
section_type: str | None = None
|
67
72
|
header_content = ""
|
@@ -79,12 +84,11 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
79
84
|
parts = re.split(r"[:\s]+", line.strip(), maxsplit=1)
|
80
85
|
if len(parts) > 1:
|
81
86
|
header_content = parts[1].strip()
|
82
|
-
elif stripped_lower.startswith(("tags",)):
|
87
|
+
elif stripped_lower.startswith(("tags",)): # Match "tags" without colon for header content
|
83
88
|
section_type = "tags"
|
84
89
|
parts = re.split(r"[:\s]+", line.strip(), maxsplit=1)
|
85
90
|
if len(parts) > 1:
|
86
91
|
header_content = parts[1].strip()
|
87
|
-
|
88
92
|
elif stripped_lower.endswith(":") and stripped_lower[:-1] in (
|
89
93
|
"attributes",
|
90
94
|
"see also",
|
@@ -97,7 +101,6 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
97
101
|
"warnings",
|
98
102
|
):
|
99
103
|
section_type = "other"
|
100
|
-
|
101
104
|
return section_type is not None, section_type, header_content
|
102
105
|
|
103
106
|
in_summary = True
|
@@ -113,10 +116,8 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
113
116
|
in_summary = False
|
114
117
|
summary = " ".join(summary_lines).strip()
|
115
118
|
summary_lines = []
|
116
|
-
|
117
119
|
if not stripped_line:
|
118
120
|
continue
|
119
|
-
|
120
121
|
else:
|
121
122
|
summary_lines.append(stripped_line)
|
122
123
|
continue
|
@@ -133,27 +134,15 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
133
134
|
current_section in ["returns", "tags", "other"]
|
134
135
|
and current_desc_lines
|
135
136
|
and original_indentation == 0
|
136
|
-
and stripped_line
|
137
|
+
and stripped_line # Ensure it's not an empty unindented line (handled by rule 2)
|
137
138
|
)
|
138
139
|
):
|
139
140
|
should_finalize_previous = True
|
140
|
-
elif (
|
141
|
-
current_section in ["args", "raises"]
|
142
|
-
and current_key is not None
|
143
|
-
or current_section in ["returns", "tags", "other"]
|
144
|
-
and current_desc_lines
|
145
|
-
):
|
146
|
-
pass
|
147
141
|
|
148
142
|
if should_finalize_previous:
|
149
143
|
finalize_current_item()
|
150
|
-
|
151
|
-
|
152
|
-
and current_key is not None
|
153
|
-
and not key_pattern.match(line)
|
154
|
-
and (not stripped_line or original_indentation == 0)
|
155
|
-
):
|
156
|
-
current_key = None
|
144
|
+
current_key = None
|
145
|
+
current_arg_type_str = None
|
157
146
|
current_desc_lines = []
|
158
147
|
|
159
148
|
if is_new_section_header:
|
@@ -169,15 +158,14 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
169
158
|
match = key_pattern.match(line)
|
170
159
|
if match:
|
171
160
|
current_key = match.group(1)
|
172
|
-
|
173
|
-
|
161
|
+
current_arg_type_str = match.group(2).strip() if match.group(2) else None
|
162
|
+
current_desc_lines = [match.group(3).strip()] # Start new description
|
163
|
+
elif current_key is not None: # Continuation line for an existing key
|
174
164
|
current_desc_lines.append(stripped_line)
|
175
|
-
|
176
165
|
elif current_section in ["returns", "tags", "other"]:
|
177
166
|
current_desc_lines.append(stripped_line)
|
178
167
|
|
179
|
-
finalize_current_item()
|
180
|
-
|
168
|
+
finalize_current_item() # Finalize any pending item at the end of the docstring
|
181
169
|
if in_summary:
|
182
170
|
summary = " ".join(summary_lines).strip()
|
183
171
|
|
@@ -191,29 +179,23 @@ def parse_docstring(docstring: str | None) -> dict[str, Any]:
|
|
191
179
|
|
192
180
|
|
193
181
|
docstring_example = """
|
194
|
-
|
195
|
-
Returns the job ID immediately.
|
182
|
+
Creates a new product in the CRM product library to manage the collection of goods and services offered by the company.
|
196
183
|
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
params: Optional dictionary of parameters to customize the crawl.
|
201
|
-
See API docs for details.
|
202
|
-
idempotency_key: Optional unique key to prevent duplicate jobs.
|
184
|
+
Args:
|
185
|
+
associations (array): associations
|
186
|
+
properties (object): No description provided. Example: "{'description': 'Onboarding service for data product', 'name': '1 year implementation consultation', 'price': '6000.00', 'hs_sku': '191902', 'hs_cost_of_goods_sold': '600.00', 'hs_recurring_billing_period': 'P24M', 'city': 'Cambridge', 'phone': '(877) 929-0687', 'state': 'Massachusetts', 'domain': 'biglytics.net', 'industry': 'Technology', 'amount': '1500.00', 'dealname': 'Custom data integrations', 'pipeline': 'default', 'closedate': '2019-12-07T16:50:06.678Z', 'dealstage': 'presentationscheduled', 'hubspot_owner_id': '910901', 'email': 'bcooper@biglytics.net', 'company': 'Biglytics', 'website': 'biglytics.net', 'lastname': 'Cooper', 'firstname': 'Bryan', 'subject': 'troubleshoot report', 'hs_pipeline': 'support_pipeline', 'hs_pipeline_stage': 'open', 'hs_ticket_priority': 'HIGH', 'quantity': '2', 'hs_product_id': '191902', 'recurringbillingfrequency': 'monthly'}".
|
203
187
|
|
204
|
-
|
205
|
-
|
206
|
-
or a string containing an error message on failure. This description
|
207
|
-
can also span multiple lines.
|
188
|
+
Returns:
|
189
|
+
dict[str, Any]: successful operation
|
208
190
|
|
209
191
|
Raises:
|
210
|
-
|
211
|
-
|
192
|
+
HTTPError: Raised when the API request fails (e.g., non-2XX status code).
|
193
|
+
JSONDecodeError: Raised if the response body cannot be parsed as JSON.
|
212
194
|
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
195
|
+
Tags:
|
196
|
+
Basic, Another Tag
|
197
|
+
Yet Another Tag
|
198
|
+
"""
|
217
199
|
|
218
200
|
if __name__ == "__main__":
|
219
201
|
import json
|
@@ -26,6 +26,21 @@ class APISegmentBase:
|
|
26
26
|
|
27
27
|
def _delete(self, url: str, params: dict = None, **kwargs):
|
28
28
|
return self.main_app_client._delete(url, params=params, **kwargs)
|
29
|
+
|
30
|
+
def _get_json(self, url: str, params: dict = None, **kwargs):
|
31
|
+
return self.main_app_client._get_json(url, params=params, **kwargs)
|
32
|
+
|
33
|
+
def _post_json(self, url: str, data: Any = None, files: Any = None, params: dict = None, content_type: str = "application/json", **kwargs):
|
34
|
+
return self.main_app_client._post_json(url, data=data, files=files, params=params, content_type=content_type, **kwargs)
|
35
|
+
|
36
|
+
def _put_json(self, url: str, data: Any = None, files: Any = None, params: dict = None, content_type: str = "application/json", **kwargs):
|
37
|
+
return self.main_app_client._put_json(url, data=data, files=files, params=params, content_type=content_type, **kwargs)
|
38
|
+
|
39
|
+
def _patch_json(self, url: str, data: Any = None, params: dict = None, **kwargs):
|
40
|
+
return self.main_app_client._patch_json(url, data=data, params=params, **kwargs)
|
41
|
+
|
42
|
+
def _delete_json(self, url: str, params: dict = None, **kwargs):
|
43
|
+
return self.main_app_client._delete_json(url, params=params, **kwargs)
|
29
44
|
"""
|
30
45
|
|
31
46
|
|
@@ -149,7 +149,9 @@ def _sanitize_identifier(name: str | None) -> str:
|
|
149
149
|
return ""
|
150
150
|
|
151
151
|
# Initial replacements for common non-alphanumeric characters
|
152
|
-
sanitized =
|
152
|
+
sanitized = (
|
153
|
+
name.replace("-", "_").replace(".", "_").replace("[", "_").replace("]", "").replace("$", "_").replace("/", "_")
|
154
|
+
)
|
153
155
|
|
154
156
|
# Remove leading underscores, but preserve a single underscore if the name (after initial replace)
|
155
157
|
# consisted only of underscores.
|
@@ -845,8 +847,7 @@ def _generate_method_code(path, method, operation):
|
|
845
847
|
|
846
848
|
raises_section_lines = [
|
847
849
|
"Raises:",
|
848
|
-
"
|
849
|
-
" JSONDecodeError: Raised if the response body cannot be parsed as JSON.",
|
850
|
+
" HTTPStatusError: Raised when the API request fails with detailed error information including status code and response body.",
|
850
851
|
]
|
851
852
|
docstring_parts.append("\n".join(raises_section_lines))
|
852
853
|
|
@@ -967,42 +968,36 @@ def _generate_method_code(path, method, operation):
|
|
967
968
|
# --- Make HTTP Request ---
|
968
969
|
# This section generates the actual HTTP call
|
969
970
|
# using the prepared URL, query parameters, request body data, files, and content type.
|
971
|
+
# Use convenience methods that automatically handle responses and errors
|
970
972
|
|
971
973
|
if method_lower == "get":
|
972
|
-
body_lines.append("
|
974
|
+
body_lines.append(" return self._get_json(url, params=query_params)")
|
973
975
|
elif method_lower == "post":
|
974
976
|
if selected_content_type == "multipart/form-data":
|
975
977
|
body_lines.append(
|
976
|
-
f"
|
978
|
+
f" return self._post_json(url, data=request_body_data, files=files_data, params=query_params, content_type='{final_content_type_for_api_call}')"
|
977
979
|
)
|
978
980
|
else:
|
979
981
|
body_lines.append(
|
980
|
-
f"
|
982
|
+
f" return self._post_json(url, data=request_body_data, params=query_params, content_type='{final_content_type_for_api_call}')"
|
981
983
|
)
|
982
984
|
elif method_lower == "put":
|
983
985
|
if selected_content_type == "multipart/form-data":
|
984
986
|
body_lines.append(
|
985
|
-
f"
|
987
|
+
f" return self._put_json(url, data=request_body_data, files=files_data, params=query_params, content_type='{final_content_type_for_api_call}')"
|
986
988
|
)
|
987
989
|
else:
|
988
990
|
body_lines.append(
|
989
|
-
f"
|
991
|
+
f" return self._put_json(url, data=request_body_data, params=query_params, content_type='{final_content_type_for_api_call}')"
|
990
992
|
)
|
991
993
|
elif method_lower == "patch":
|
992
|
-
body_lines.append("
|
994
|
+
body_lines.append(" return self._patch_json(url, data=request_body_data, params=query_params)")
|
993
995
|
elif method_lower == "delete":
|
994
|
-
body_lines.append("
|
996
|
+
body_lines.append(" return self._delete_json(url, params=query_params)")
|
995
997
|
else:
|
996
|
-
body_lines.append(f"
|
997
|
-
|
998
|
-
#
|
999
|
-
body_lines.append(" response.raise_for_status()")
|
1000
|
-
body_lines.append(" if response.status_code == 204 or not response.content or not response.text.strip():")
|
1001
|
-
body_lines.append(" return None")
|
1002
|
-
body_lines.append(" try:")
|
1003
|
-
body_lines.append(" return response.json()")
|
1004
|
-
body_lines.append(" except ValueError:")
|
1005
|
-
body_lines.append(" return None")
|
998
|
+
body_lines.append(f" return self._{method_lower}_json(url, data=request_body_data, params=query_params)")
|
999
|
+
|
1000
|
+
# No need for manual response handling anymore - convenience methods handle it automatically
|
1006
1001
|
|
1007
1002
|
# --- Combine Signature, Docstring, and Body for Final Method Code ---
|
1008
1003
|
method_code = signature + formatted_docstring + "\n" + "\n".join(body_lines)
|
@@ -0,0 +1,31 @@
|
|
1
|
+
from loguru import logger
|
2
|
+
|
3
|
+
from universal_mcp.tools.tools import Tool
|
4
|
+
|
5
|
+
|
6
|
+
def check_application_instance(app_instance, app_name):
|
7
|
+
assert app_instance is not None, f"Application object is None for {app_name}"
|
8
|
+
assert (
|
9
|
+
app_instance.name == app_name
|
10
|
+
), f"Application instance name '{app_instance.name}' does not match expected name '{app_name}'"
|
11
|
+
|
12
|
+
tools = app_instance.list_tools()
|
13
|
+
logger.info(f"Tools for {app_name}: {len(tools)}")
|
14
|
+
assert len(tools) > 0, f"No tools found for {app_name}"
|
15
|
+
|
16
|
+
tools = [Tool.from_function(tool) for tool in tools]
|
17
|
+
seen_names = set()
|
18
|
+
important_tools = []
|
19
|
+
|
20
|
+
for tool in tools:
|
21
|
+
assert tool.name is not None, f"Tool name is None for a tool in {app_name}"
|
22
|
+
assert (
|
23
|
+
0 < len(tool.name) <= 48
|
24
|
+
), f"Tool name '{tool.name}' for {app_name} has invalid length (must be between 1 and 47 characters)"
|
25
|
+
assert tool.description is not None, f"Tool description is None for tool '{tool.name}' in {app_name}"
|
26
|
+
# assert 0 < len(tool.description) <= 255, f"Tool description for '{tool.name}' in {app_name} has invalid length (must be between 1 and 255 characters)"
|
27
|
+
assert tool.name not in seen_names, f"Duplicate tool name: '{tool.name}' found for {app_name}"
|
28
|
+
seen_names.add(tool.name)
|
29
|
+
if "important" in tool.tags:
|
30
|
+
important_tools.append(tool.name)
|
31
|
+
assert len(important_tools) > 0, f"No important tools found for {app_name}"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: universal-mcp
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.23rc1
|
4
4
|
Summary: Universal MCP acts as a middle ware for your API applications. It can store your credentials, authorize, enable disable apps on the fly and much more.
|
5
5
|
Author-email: Manoj Bajaj <manojbajaj95@gmail.com>
|
6
6
|
License: MIT
|
@@ -6,40 +6,41 @@ universal_mcp/exceptions.py,sha256=-pbeZhpNieJfnSd2-WM80pU8W8mK8VHXcSjky0BHwdk,6
|
|
6
6
|
universal_mcp/logger.py,sha256=VmH_83efpErLEDTJqz55Dp0dioTXfGvMBLZUx5smOLc,2116
|
7
7
|
universal_mcp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
8
|
universal_mcp/applications/README.md,sha256=eqbizxaTxKH2O1tyIJR2yI0Db5TQxtgPd_vbpWyCa2Y,3527
|
9
|
-
universal_mcp/applications/__init__.py,sha256=
|
10
|
-
universal_mcp/applications/application.py,sha256
|
9
|
+
universal_mcp/applications/__init__.py,sha256=l19_sMs5766VFWU_7O2niamvvvfQOteysqylbqvjjGQ,3500
|
10
|
+
universal_mcp/applications/application.py,sha256=-5uHUJORRjRnOwDbqJO4qSJLrFSGRghaUWOKMqhN5vo,22891
|
11
11
|
universal_mcp/integrations/README.md,sha256=lTAPXO2nivcBe1q7JT6PRa6v9Ns_ZersQMIdw-nmwEA,996
|
12
12
|
universal_mcp/integrations/__init__.py,sha256=X8iEzs02IlXfeafp6GMm-cOkg70QdjnlTRuFo24KEfo,916
|
13
|
-
universal_mcp/integrations/integration.py,sha256=
|
13
|
+
universal_mcp/integrations/integration.py,sha256=2Wv9g5fJ4cbsTNsp4WcFNKdQCnj6rbBhgNQgMDAQ1Os,13057
|
14
14
|
universal_mcp/servers/README.md,sha256=ytFlgp8-LO0oogMrHkMOp8SvFTwgsKgv7XhBVZGNTbM,2284
|
15
15
|
universal_mcp/servers/__init__.py,sha256=eBZCsaZjiEv6ZlRRslPKgurQxmpHLQyiXv2fTBygHnM,532
|
16
|
-
universal_mcp/servers/server.py,sha256=
|
16
|
+
universal_mcp/servers/server.py,sha256=K7sPdCixYgJmQRxOL1icscL7-52sVsghpRX_D_uREu4,12329
|
17
17
|
universal_mcp/stores/README.md,sha256=jrPh_ow4ESH4BDGaSafilhOVaN8oQ9IFlFW-j5Z5hLA,2465
|
18
18
|
universal_mcp/stores/__init__.py,sha256=quvuwhZnpiSLuojf0NfmBx2xpaCulv3fbKtKaSCEmuM,603
|
19
19
|
universal_mcp/stores/store.py,sha256=mxnmOVlDNrr8OKhENWDtCIfK7YeCBQcGdS6I2ogRCsU,6756
|
20
20
|
universal_mcp/tools/README.md,sha256=RuxliOFqV1ZEyeBdj3m8UKfkxAsfrxXh-b6V4ZGAk8I,2468
|
21
21
|
universal_mcp/tools/__init__.py,sha256=Fatza_R0qYWmNF1WQSfUZZKQFu5qf-16JhZzdmyx3KY,333
|
22
|
-
universal_mcp/tools/adapters.py,sha256=
|
23
|
-
universal_mcp/tools/func_metadata.py,sha256=
|
22
|
+
universal_mcp/tools/adapters.py,sha256=nMoZ9jnv1uKhfq6NmBJ5-a6uwdB_H8RqkdNLIacCRfM,2978
|
23
|
+
universal_mcp/tools/func_metadata.py,sha256=7kUWArtUDa2Orr7VGzpwPVfyf2LM3UFA_9arMpl7Zn8,10838
|
24
24
|
universal_mcp/tools/manager.py,sha256=ao_ovTyca8HR4uwHdL_lTWNdquxcqRx6FaLA4U1lZvQ,11242
|
25
|
-
universal_mcp/tools/tools.py,sha256=
|
25
|
+
universal_mcp/tools/tools.py,sha256=8S_KzARYbG9xbyqhZcI4Wk46tXiZcWlcAMgjChXNEI4,3698
|
26
26
|
universal_mcp/utils/__init__.py,sha256=8wi4PGWu-SrFjNJ8U7fr2iFJ1ktqlDmSKj1xYd7KSDc,41
|
27
|
-
universal_mcp/utils/agentr.py,sha256
|
27
|
+
universal_mcp/utils/agentr.py,sha256=-brwvgCZgPjvF7wPXw0QfpEsl1ekXQxmcF07-1AQMR4,3663
|
28
28
|
universal_mcp/utils/common.py,sha256=HEZC2Mhilb8DrGXQG2tboAIw1r4veGilGWjfnPF1lyA,888
|
29
|
-
universal_mcp/utils/docstring_parser.py,sha256=
|
29
|
+
universal_mcp/utils/docstring_parser.py,sha256=efEOE-ME7G5Jbbzpn7pN2xNuyu2M5zfZ1Tqu1lRB0Gk,8392
|
30
30
|
universal_mcp/utils/installation.py,sha256=ItOfBFhKOh4DLz237jgAz_Fn0uOMdrKXw0n5BaUZZNs,7286
|
31
31
|
universal_mcp/utils/singleton.py,sha256=kolHnbS9yd5C7z-tzaUAD16GgI-thqJXysNi3sZM4No,733
|
32
|
+
universal_mcp/utils/testing.py,sha256=0znYkuFi8-WjOdbwrTbNC-UpMqG3EXcGOE0wxlERh_A,1464
|
32
33
|
universal_mcp/utils/openapi/__inti__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
33
34
|
universal_mcp/utils/openapi/api_generator.py,sha256=FjtvbnWuI1P8W8wXuKLCirUtsqQ4HI_TuQrhpA4SqTs,4749
|
34
|
-
universal_mcp/utils/openapi/api_splitter.py,sha256=
|
35
|
+
universal_mcp/utils/openapi/api_splitter.py,sha256=hED34exwKcBtKzkz-3jVWzNeFBNGgjxANQeu1FibuOU,21818
|
35
36
|
universal_mcp/utils/openapi/docgen.py,sha256=DNmwlhg_-TRrHa74epyErMTRjV2nutfCQ7seb_Rq5hE,21366
|
36
|
-
universal_mcp/utils/openapi/openapi.py,sha256=
|
37
|
+
universal_mcp/utils/openapi/openapi.py,sha256=tUD3HNLGAF808AszHLGGKPqpqLT-PZB_8LwagyvsWKQ,50828
|
37
38
|
universal_mcp/utils/openapi/preprocessor.py,sha256=PPIM3Uu8DYi3dRKdqi9thr9ufeUgkr2K08ri1BwKpoQ,60835
|
38
39
|
universal_mcp/utils/openapi/readme.py,sha256=R2Jp7DUXYNsXPDV6eFTkLiy7MXbSULUj1vHh4O_nB4c,2974
|
39
40
|
universal_mcp/utils/templates/README.md.j2,sha256=Mrm181YX-o_-WEfKs01Bi2RJy43rBiq2j6fTtbWgbTA,401
|
40
41
|
universal_mcp/utils/templates/api_client.py.j2,sha256=972Im7LNUAq3yZTfwDcgivnb-b8u6_JLKWXwoIwXXXQ,908
|
41
|
-
universal_mcp-0.1.
|
42
|
-
universal_mcp-0.1.
|
43
|
-
universal_mcp-0.1.
|
44
|
-
universal_mcp-0.1.
|
45
|
-
universal_mcp-0.1.
|
42
|
+
universal_mcp-0.1.23rc1.dist-info/METADATA,sha256=qsh7rNBIpaSDFBkw7vmIUpDgM4YYkriK9HYeDOLJpwE,12154
|
43
|
+
universal_mcp-0.1.23rc1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
44
|
+
universal_mcp-0.1.23rc1.dist-info/entry_points.txt,sha256=QlBrVKmA2jIM0q-C-3TQMNJTTWOsOFQvgedBq2rZTS8,56
|
45
|
+
universal_mcp-0.1.23rc1.dist-info/licenses/LICENSE,sha256=NweDZVPslBAZFzlgByF158b85GR0f5_tLQgq1NS48To,1063
|
46
|
+
universal_mcp-0.1.23rc1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|