universal-mcp 0.1.22rc4__py3-none-any.whl → 0.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
universal_mcp/config.py CHANGED
@@ -1,7 +1,8 @@
1
+ import json
1
2
  from pathlib import Path
2
- from typing import Any, Literal
3
+ from typing import Any, Literal, Self
3
4
 
4
- from pydantic import BaseModel, Field, SecretStr, field_validator
5
+ from pydantic import BaseModel, Field, SecretStr, field_validator, model_validator
5
6
  from pydantic_settings import BaseSettings, SettingsConfigDict
6
7
 
7
8
 
@@ -19,7 +20,7 @@ class IntegrationConfig(BaseModel):
19
20
  """Configuration for API integrations."""
20
21
 
21
22
  name: str = Field(..., description="Name of the integration")
22
- type: Literal["api_key", "oauth", "agentr", "oauth2"] = Field(
23
+ type: Literal["api_key", "oauth", "agentr", "oauth2", "basic_auth"] = Field(
23
24
  default="api_key", description="Type of authentication to use"
24
25
  )
25
26
  credentials: dict[str, Any] | None = Field(default=None, description="Integration-specific credentials")
@@ -46,6 +47,9 @@ class ServerConfig(BaseSettings):
46
47
 
47
48
  name: str = Field(default="Universal MCP", description="Name of the MCP server")
48
49
  description: str = Field(default="Universal MCP", description="Description of the MCP server")
50
+ base_url: str = Field(
51
+ default="https://api.agentr.dev", description="Base URL for AgentR API", alias="AGENTR_BASE_URL"
52
+ )
49
53
  api_key: SecretStr | None = Field(default=None, description="API key for authentication", alias="AGENTR_API_KEY")
50
54
  type: Literal["local", "agentr"] = Field(default="agentr", description="Type of server deployment")
51
55
  transport: Literal["stdio", "sse", "streamable-http"] = Field(
@@ -70,3 +74,58 @@ class ServerConfig(BaseSettings):
70
74
  if not 1 <= v <= 65535:
71
75
  raise ValueError("Port must be between 1 and 65535")
72
76
  return v
77
+
78
+ @classmethod
79
+ def load_json_config(cls, path: str = "local_config.json") -> Self:
80
+ with open(path) as f:
81
+ data = json.load(f)
82
+ return cls.model_validate(data)
83
+
84
+
85
+ class ClientTransportConfig(BaseModel):
86
+ transport: str | None = None
87
+ command: str | None = None
88
+ args: list[str] = []
89
+ env: dict[str, str] = {}
90
+ url: str | None = None
91
+ headers: dict[str, str] = {}
92
+
93
+ @model_validator(mode="after")
94
+ def model_validate(self) -> Self:
95
+ """
96
+ Set the transport type based on the presence of command or url.
97
+ - If command is present, transport is 'stdio'.
98
+ - Else if url ends with 'mcp', transport is 'streamable_http'.
99
+ - Else, transport is 'sse'.
100
+ """
101
+ if self.command:
102
+ self.transport = "stdio"
103
+ elif self.url:
104
+ # Remove search params from url
105
+ url = self.url.split("?")[0]
106
+ if url.rstrip("/").endswith("mcp"):
107
+ self.transport = "streamable_http"
108
+ elif url.rstrip("/").endswith("sse"):
109
+ self.transport = "sse"
110
+ else:
111
+ raise ValueError(f"Unknown transport: {self.url}")
112
+ else:
113
+ raise ValueError("Either command or url must be provided")
114
+ return self
115
+
116
+
117
+ class LLMConfig(BaseModel):
118
+ api_key: str
119
+ base_url: str
120
+ model: str
121
+
122
+
123
+ class ClientConfig(BaseSettings):
124
+ mcpServers: dict[str, ClientTransportConfig]
125
+ llm: LLMConfig | None = None
126
+
127
+ @classmethod
128
+ def load_json_config(cls, path: str = "servers.json") -> Self:
129
+ with open(path) as f:
130
+ data = json.load(f)
131
+ return cls.model_validate(data)
@@ -325,9 +325,9 @@ class AgentRIntegration(Integration):
325
325
  ValueError: If no API key is provided or found in environment variables
326
326
  """
327
327
 
328
- def __init__(self, name: str, api_key: str, **kwargs):
328
+ def __init__(self, name: str, api_key: str | None = None, base_url: str | None = None, **kwargs):
329
329
  super().__init__(name, **kwargs)
330
- self.client = AgentrClient(api_key=api_key)
330
+ self.client = AgentrClient(api_key=api_key, base_url=base_url)
331
331
  self._credentials = None
332
332
 
333
333
  def set_credentials(self, credentials: dict | None = None):
@@ -206,8 +206,8 @@ class AgentRServer(BaseServer):
206
206
  self.api_key = config.api_key.get_secret_value() if config.api_key else None
207
207
  if not self.api_key:
208
208
  raise ValueError("API key is required for AgentR server")
209
- logger.info(f"Initializing AgentR server with API key: {self.api_key}")
210
- self.client = AgentrClient(api_key=self.api_key)
209
+ logger.info(f"Initializing AgentR server with API key: {self.api_key} and base URL: {config.base_url}")
210
+ self.client = AgentrClient(api_key=self.api_key, base_url=config.base_url)
211
211
  self._load_apps()
212
212
 
213
213
  def _fetch_apps(self) -> list[AppConfig]:
@@ -245,7 +245,7 @@ class AgentRServer(BaseServer):
245
245
  """
246
246
  try:
247
247
  integration = (
248
- AgentRIntegration(name=app_config.integration.name, api_key=self.api_key)
248
+ AgentRIntegration(name=app_config.integration.name, api_key=self.api_key, base_url=self.config.base_url)
249
249
  if app_config.integration
250
250
  else None
251
251
  )
@@ -276,10 +276,10 @@ class AgentRServer(BaseServer):
276
276
  else:
277
277
  logger.info(f"Successfully loaded {loaded_apps}/{len(app_configs)} apps from AgentR")
278
278
 
279
- except Exception:
279
+ except Exception as e:
280
280
  logger.error("Failed to load apps", exc_info=True)
281
281
  # Don't raise the exception to allow server to start with partial functionality
282
- logger.warning("Server will start with limited functionality due to app loading failures")
282
+ logger.warning(f"Server will start with limited functionality due to app loading failures: {e}")
283
283
 
284
284
 
285
285
  class SingleMCPServer(BaseServer):
@@ -102,3 +102,19 @@ def convert_tool_to_openai_tool(
102
102
  }
103
103
  logger.debug(f"Successfully converted tool '{tool.name}' to OpenAI format")
104
104
  return openai_tool
105
+
106
+
107
+ def transform_mcp_tool_to_openai_tool(mcp_tool: Tool):
108
+ """Convert an MCP tool to an OpenAI tool."""
109
+ from openai.types import FunctionDefinition
110
+ from openai.types.chat import ChatCompletionToolParam
111
+
112
+ return ChatCompletionToolParam(
113
+ type="function",
114
+ function=FunctionDefinition(
115
+ name=mcp_tool.name,
116
+ description=mcp_tool.description or "",
117
+ parameters=mcp_tool.inputSchema,
118
+ strict=False,
119
+ ),
120
+ )
@@ -14,6 +14,55 @@ from pydantic.fields import FieldInfo
14
14
  from pydantic_core import PydanticUndefined
15
15
 
16
16
 
17
+ def _map_docstring_type_to_python_type(type_str: str | None) -> Any:
18
+ """Maps common docstring type strings to Python types."""
19
+ if not type_str:
20
+ return Any
21
+ type_str_lower = type_str.lower()
22
+ mapping = {
23
+ "str": str,
24
+ "string": str,
25
+ "int": int,
26
+ "integer": int,
27
+ "float": float,
28
+ "number": float,
29
+ "bool": bool,
30
+ "boolean": bool,
31
+ "list": list,
32
+ "array": list,
33
+ "dict": dict,
34
+ "object": dict,
35
+ "any": Any,
36
+ }
37
+ return mapping.get(type_str_lower, Any)
38
+
39
+
40
+ def _map_docstring_type_to_schema_type(type_str: str | None) -> str:
41
+ """Maps common docstring type strings to JSON schema type strings."""
42
+ # This function might not be strictly needed if Pydantic correctly infers
43
+ # schema types from Python types, but kept for explicitness if used.
44
+ # The primary use-case now is for json_schema_extra for untyped Any.
45
+ if not type_str:
46
+ return "string"
47
+ type_str_lower = type_str.lower()
48
+ mapping = {
49
+ "str": "string",
50
+ "string": "string",
51
+ "int": "integer",
52
+ "integer": "integer",
53
+ "float": "number",
54
+ "number": "number",
55
+ "bool": "boolean",
56
+ "boolean": "boolean",
57
+ "list": "array",
58
+ "array": "array",
59
+ "dict": "object",
60
+ "object": "object",
61
+ "any": "string",
62
+ }
63
+ return mapping.get(type_str_lower, "string")
64
+
65
+
17
66
  def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
18
67
  def try_eval_type(value: Any, globalns: dict[str, Any], localns: dict[str, Any]) -> tuple[Any, bool]:
19
68
  try:
@@ -25,8 +74,6 @@ def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
25
74
  annotation = ForwardRef(annotation)
26
75
  annotation, status = try_eval_type(annotation, globalns, globalns)
27
76
 
28
- # This check and raise could perhaps be skipped, and we (FastMCP) just call
29
- # model_rebuild right before using it 🤷
30
77
  if status is False:
31
78
  raise InvalidSignature(f"Unable to evaluate type annotation {annotation}")
32
79
 
@@ -34,7 +81,6 @@ def _get_typed_annotation(annotation: Any, globalns: dict[str, Any]) -> Any:
34
81
 
35
82
 
36
83
  def _get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
37
- """Get function signature while evaluating forward references"""
38
84
  signature = inspect.signature(call)
39
85
  globalns = getattr(call, "__globals__", {})
40
86
  typed_params = [
@@ -51,13 +97,7 @@ def _get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:
51
97
 
52
98
 
53
99
  class ArgModelBase(BaseModel):
54
- """A model representing the arguments to a function."""
55
-
56
100
  def model_dump_one_level(self) -> dict[str, Any]:
57
- """Return a dict of the model's fields, one level deep.
58
-
59
- That is, sub-models etc are not dumped - they are kept as pydantic models.
60
- """
61
101
  kwargs: dict[str, Any] = {}
62
102
  for field_name in self.__class__.model_fields:
63
103
  kwargs[field_name] = getattr(self, field_name)
@@ -70,9 +110,6 @@ class ArgModelBase(BaseModel):
70
110
 
71
111
  class FuncMetadata(BaseModel):
72
112
  arg_model: Annotated[type[ArgModelBase], WithJsonSchema(None)]
73
- # We can add things in the future like
74
- # - Maybe some args are excluded from attempting to parse from JSON
75
- # - Maybe some args are special (like context) for dependency injection
76
113
 
77
114
  async def call_fn_with_arg_validation(
78
115
  self,
@@ -82,11 +119,6 @@ class FuncMetadata(BaseModel):
82
119
  arguments_to_pass_directly: dict[str, Any] | None,
83
120
  context: dict[str, Any] | None = None,
84
121
  ) -> Any:
85
- """Call the given function with arguments validated and injected.
86
-
87
- Arguments are first attempted to be parsed from JSON, then validated against
88
- the argument model, before being passed to the function.
89
- """
90
122
  arguments_pre_parsed = self.pre_parse_json(arguments_to_validate)
91
123
  arguments_parsed_model = self.arg_model.model_validate(arguments_pre_parsed)
92
124
  arguments_parsed_dict = arguments_parsed_model.model_dump_one_level()
@@ -102,17 +134,7 @@ class FuncMetadata(BaseModel):
102
134
  raise TypeError("fn must be either Callable or Awaitable")
103
135
 
104
136
  def pre_parse_json(self, data: dict[str, Any]) -> dict[str, Any]:
105
- """Pre-parse data from JSON.
106
-
107
- Return a dict with same keys as input but with values parsed from JSON
108
- if appropriate.
109
-
110
- This is to handle cases like `["a", "b", "c"]` being passed in as JSON inside
111
- a string rather than an actual list. Claude desktop is prone to this - in fact
112
- it seems incapable of NOT doing this. For sub-models, it tends to pass
113
- dicts (JSON objects) as JSON strings, which can be pre-parsed here.
114
- """
115
- new_data = data.copy() # Shallow copy
137
+ new_data = data.copy()
116
138
  for field_name, _field_info in self.arg_model.model_fields.items():
117
139
  if field_name not in data:
118
140
  continue
@@ -120,11 +142,8 @@ class FuncMetadata(BaseModel):
120
142
  try:
121
143
  pre_parsed = json.loads(data[field_name])
122
144
  except json.JSONDecodeError:
123
- continue # Not JSON - skip
145
+ continue
124
146
  if isinstance(pre_parsed, str | int | float):
125
- # This is likely that the raw value is e.g. `"hello"` which we
126
- # Should really be parsed as '"hello"' in Python - but if we parse
127
- # it as JSON it'll turn into just 'hello'. So we skip it.
128
147
  continue
129
148
  new_data[field_name] = pre_parsed
130
149
  assert new_data.keys() == data.keys()
@@ -139,73 +158,131 @@ class FuncMetadata(BaseModel):
139
158
  cls,
140
159
  func: Callable[..., Any],
141
160
  skip_names: Sequence[str] = (),
142
- arg_description: dict[str, str] | None = None,
161
+ arg_description: dict[str, dict[str, str | None]] | None = None,
143
162
  ) -> "FuncMetadata":
144
- """Given a function, return metadata including a pydantic model representing its
145
- signature.
146
-
147
- The use case for this is
148
- ```
149
- meta = func_to_pyd(func)
150
- validated_args = meta.arg_model.model_validate(some_raw_data_dict)
151
- return func(**validated_args.model_dump_one_level())
152
- ```
153
-
154
- **critically** it also provides pre-parse helper to attempt to parse things from
155
- JSON.
156
-
157
- Args:
158
- func: The function to convert to a pydantic model
159
- skip_names: A list of parameter names to skip. These will not be included in
160
- the model.
161
- Returns:
162
- A pydantic model representing the function's signature.
163
- """
164
163
  sig = _get_typed_signature(func)
165
164
  params = sig.parameters
166
165
  dynamic_pydantic_model_params: dict[str, Any] = {}
167
166
  globalns = getattr(func, "__globals__", {})
167
+ arg_description_map = arg_description or {}
168
+
168
169
  for param in params.values():
169
170
  if param.name.startswith("_"):
170
171
  raise InvalidSignature(f"Parameter {param.name} of {func.__name__} cannot start with '_'")
171
172
  if param.name in skip_names:
172
173
  continue
173
- annotation = param.annotation
174
-
175
- # `x: None` / `x: None = None`
176
- if annotation is None:
177
- annotation = Annotated[
178
- None,
179
- Field(default=param.default if param.default is not inspect.Parameter.empty else PydanticUndefined),
180
- ]
181
-
182
- # Untyped field
183
- if annotation is inspect.Parameter.empty:
184
- annotation = Annotated[
185
- Any,
186
- Field(),
187
- # 🤷
188
- WithJsonSchema({"title": param.name, "type": "string"}),
189
- ]
190
-
191
- field_info = FieldInfo.from_annotated_attribute(
192
- _get_typed_annotation(annotation, globalns),
193
- param.default if param.default is not inspect.Parameter.empty else PydanticUndefined,
194
- )
195
- if not field_info.title:
174
+
175
+ sig_annotation = param.annotation
176
+ default_val = param.default if param.default is not inspect.Parameter.empty else PydanticUndefined
177
+
178
+ param_doc_info = arg_description_map.get(param.name, {})
179
+ docstring_description = param_doc_info.get("description")
180
+ docstring_type_str = param_doc_info.get("type_str")
181
+
182
+ annotation_for_field_builder: Any
183
+
184
+ if sig_annotation is None:
185
+ annotation_for_field_builder = type(None)
186
+ elif sig_annotation is inspect.Parameter.empty:
187
+ py_type_from_doc = _map_docstring_type_to_python_type(docstring_type_str)
188
+
189
+ if py_type_from_doc is Any and not docstring_type_str:
190
+ schema_type_for_any = _map_docstring_type_to_schema_type(docstring_type_str)
191
+ annotation_for_field_builder = Annotated[
192
+ Any, Field(json_schema_extra={"type": schema_type_for_any})
193
+ ]
194
+ else:
195
+ annotation_for_field_builder = py_type_from_doc
196
+ else: # Parameter has a type hint in the signature
197
+ annotation_for_field_builder = _get_typed_annotation(sig_annotation, globalns)
198
+
199
+ field_info = FieldInfo.from_annotated_attribute(annotation_for_field_builder, default_val)
200
+
201
+ if field_info.description is None and docstring_description:
202
+ field_info.description = docstring_description
203
+
204
+ if field_info.title is None:
196
205
  field_info.title = param.name
197
- if not field_info.description and arg_description and arg_description.get(param.name):
198
- field_info.description = arg_description.get(param.name)
199
- dynamic_pydantic_model_params[param.name] = (
200
- field_info.annotation,
201
- field_info,
202
- )
203
- continue
206
+
207
+ core_type_for_model = field_info.annotation
208
+
209
+ dynamic_pydantic_model_params[param.name] = (core_type_for_model, field_info)
204
210
 
205
211
  arguments_model = create_model(
206
212
  f"{func.__name__}Arguments",
207
213
  **dynamic_pydantic_model_params,
208
214
  __base__=ArgModelBase,
209
215
  )
210
- resp = FuncMetadata(arg_model=arguments_model)
211
- return resp
216
+ return FuncMetadata(arg_model=arguments_model)
217
+
218
+
219
+ if __name__ == "__main__":
220
+ import sys
221
+ from pathlib import Path
222
+
223
+ current_file = Path(__file__).resolve()
224
+ package_source_parent_dir = current_file.parent.parent.parent
225
+
226
+ if str(package_source_parent_dir) not in sys.path:
227
+ sys.path.insert(0, str(package_source_parent_dir))
228
+ print(f"DEBUG: Added to sys.path: {package_source_parent_dir}")
229
+
230
+ from universal_mcp.utils.docstring_parser import parse_docstring
231
+
232
+ def post_crm_v_objects_emails_create(self, associations, properties) -> dict[str, Any]:
233
+ """
234
+
235
+ Creates an email object in the CRM using the POST method, allowing for the association of metadata with the email and requiring authentication via OAuth2 or private apps to access the necessary permissions.
236
+
237
+ Args:
238
+ associations (array): associations Example: [{Category': 'HUBSPOT_DEFINED', 'associationTypeId': 2}]}].
239
+ properties (object): No description provided. Example: "{'ncy': 'monthly'}".
240
+
241
+ Returns:
242
+ dict[str, Any]: successful operation
243
+
244
+ Raises:
245
+ HTTPError: Raised when the API request fails (e.g., non-2XX status code).
246
+ JSONDecodeError: Raised if the response body cannot be parsed as JSON.
247
+
248
+ Tags:
249
+ Basic
250
+ """
251
+ request_body_data = None
252
+ request_body_data = {"associations": associations, "properties": properties}
253
+ request_body_data = {k: v for k, v in request_body_data.items() if v is not None}
254
+ url = f"{self.main_app_client.base_url}/crm/v3/objects/emails"
255
+ query_params = {}
256
+ response = self._post(url, data=request_body_data, params=query_params, content_type="application/json")
257
+ response.raise_for_status()
258
+ if response.status_code == 204 or not response.content or (not response.text.strip()):
259
+ return None
260
+ try:
261
+ return response.json()
262
+ except ValueError:
263
+ return None
264
+
265
+ print("--- Testing FuncMetadata with get_weather function ---")
266
+
267
+ raw_doc = inspect.getdoc(post_crm_v_objects_emails_create)
268
+ parsed_doc_info = parse_docstring(raw_doc)
269
+ arg_descriptions_from_doc = parsed_doc_info.get("args", {}) # Extract just the args part
270
+
271
+ print("\n1. Parsed Argument Descriptions from Docstring (for FuncMetadata input):")
272
+ print(json.dumps(arg_descriptions_from_doc, indent=2))
273
+
274
+ # 2. Create FuncMetadata instance
275
+ # The arg_description parameter expects a dict mapping arg name to its details
276
+ func_arg_metadata_instance = FuncMetadata.func_metadata(
277
+ post_crm_v_objects_emails_create, arg_description=arg_descriptions_from_doc
278
+ )
279
+
280
+ print("\n2. FuncMetadata Instance (its __repr__):")
281
+ print(func_arg_metadata_instance)
282
+
283
+ # 3. Get and print the JSON schema for the arguments model
284
+ parameters_schema = func_arg_metadata_instance.arg_model.model_json_schema()
285
+ print("\n3. Generated JSON Schema for Parameters (from arg_model.model_json_schema()):")
286
+ print(json.dumps(parameters_schema, indent=2))
287
+
288
+ print("\n--- Test Complete ---")
@@ -53,11 +53,17 @@ class Tool(BaseModel):
53
53
  func_arg_metadata = FuncMetadata.func_metadata(fn, arg_description=parsed_doc["args"])
54
54
  parameters = func_arg_metadata.arg_model.model_json_schema()
55
55
 
56
+ simple_args_descriptions: dict[str, str] = {}
57
+ if parsed_doc.get("args"):
58
+ for arg_name, arg_details in parsed_doc["args"].items():
59
+ if isinstance(arg_details, dict):
60
+ simple_args_descriptions[arg_name] = arg_details.get("description") or ""
61
+
56
62
  return cls(
57
63
  fn=fn,
58
64
  name=func_name,
59
65
  description=parsed_doc["summary"],
60
- args_description=parsed_doc["args"],
66
+ args_description=simple_args_descriptions,
61
67
  returns_description=parsed_doc["returns"],
62
68
  raises_description=parsed_doc["raises"],
63
69
  tags=parsed_doc["tags"],
@@ -79,8 +85,9 @@ class Tool(BaseModel):
79
85
  except NotAuthorizedError as e:
80
86
  message = f"Not authorized to call tool {self.name}: {e.message}"
81
87
  return message
82
- except httpx.HTTPError as e:
83
- message = f"HTTP error calling tool {self.name}: {str(e)}"
88
+ except httpx.HTTPStatusError as e:
89
+ error_body = e.response.text or "<empty response>"
90
+ message = f"HTTP {e.response.status_code}: {error_body}"
84
91
  raise ToolError(message) from e
85
92
  except ValueError as e:
86
93
  message = f"Invalid arguments for tool {self.name}: {e}"
@@ -1,3 +1,5 @@
1
+ import os
2
+
1
3
  import httpx
2
4
  from loguru import logger
3
5
 
@@ -18,7 +20,9 @@ class AgentrClient:
18
20
 
19
21
  def __init__(self, api_key: str, base_url: str = "https://api.agentr.dev"):
20
22
  self.base_url = base_url.rstrip("/")
21
- self.api_key = api_key
23
+ self.api_key = api_key or os.getenv("AGENTR_API_KEY")
24
+ if not self.api_key:
25
+ raise ValueError("No API key provided and AGENTR_API_KEY not found in environment variables")
22
26
  self.client = httpx.Client(
23
27
  base_url=self.base_url, headers={"X-API-KEY": self.api_key}, timeout=30, follow_redirects=True
24
28
  )