universal-mcp 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/.gitignore +9 -1
  2. universal_mcp-0.1.1/CONTRIBUTING.md +78 -0
  3. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/PKG-INFO +26 -1
  4. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/README.md +18 -0
  5. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/pyproject.toml +9 -3
  6. universal_mcp-0.1.1/src/playground/__init__.py +3 -0
  7. universal_mcp-0.1.1/src/playground/agents/react.py +50 -0
  8. universal_mcp-0.1.1/src/playground/client.py +288 -0
  9. universal_mcp-0.1.1/src/playground/memory/__init__.py +14 -0
  10. universal_mcp-0.1.1/src/playground/memory/sqlite.py +9 -0
  11. universal_mcp-0.1.1/src/playground/schema.py +177 -0
  12. universal_mcp-0.1.1/src/playground/service.py +235 -0
  13. universal_mcp-0.1.1/src/playground/settings.py +23 -0
  14. universal_mcp-0.1.1/src/playground/streamlit.py +306 -0
  15. universal_mcp-0.1.1/src/playground/utils.py +76 -0
  16. universal_mcp-0.1.1/src/tests/conftest.py +1 -0
  17. universal_mcp-0.1.1/src/tests/test_applications.py +25 -0
  18. universal_mcp-0.1.1/src/tests/test_zenquotes.py +25 -0
  19. universal_mcp-0.1.1/src/universal_mcp/cli.py +83 -0
  20. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/servers/server.py +0 -1
  21. universal_mcp-0.1.1/src/universal_mcp/utils/installation.py +89 -0
  22. universal_mcp-0.1.1/uv.lock +1923 -0
  23. universal_mcp-0.1.0/src/universal_mcp/cli.py +0 -111
  24. universal_mcp-0.1.0/uv.lock +0 -510
  25. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/.python-version +0 -0
  26. /universal_mcp-0.1.0/src/universal_mcp/applications/agentr.py → /universal_mcp-0.1.1/src/tests/__init__.py +0 -0
  27. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/__init__.py +0 -0
  28. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/__init__.py +0 -0
  29. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/application.py +0 -0
  30. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/github/app.py +0 -0
  31. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/google_calendar/app.py +0 -0
  32. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/google_mail/app.py +0 -0
  33. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/reddit/app.py +0 -0
  34. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/resend/app.py +0 -0
  35. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/tavily/app.py +0 -0
  36. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/applications/zenquotes/app.py +0 -0
  37. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/config.py +0 -0
  38. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/exceptions.py +0 -0
  39. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/integrations/README.md +0 -0
  40. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/integrations/__init__.py +0 -0
  41. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/integrations/agentr.py +0 -0
  42. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/integrations/integration.py +0 -0
  43. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/py.typed +0 -0
  44. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/servers/__init__.py +0 -0
  45. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/stores/__init__.py +0 -0
  46. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/stores/store.py +0 -0
  47. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/utils/bridge.py +0 -0
  48. {universal_mcp-0.1.0 → universal_mcp-0.1.1}/src/universal_mcp/utils/openapi.py +0 -0
@@ -14,4 +14,12 @@ wheels/
14
14
  .env.local
15
15
 
16
16
 
17
- dump/
17
+ dump/
18
+
19
+
20
+ # SQLite database
21
+ *.db
22
+ *.sqlite
23
+ *.sqlite3
24
+ *.db-shm
25
+ *.db-wal
@@ -0,0 +1,78 @@
1
+ # Contributing to AgentR
2
+
3
+ Thank you for your interest in contributing to AgentR! This document provides guidelines and instructions for contributing to the project.
4
+
5
+ ## 🚀 Getting Started
6
+
7
+ 1. Fork the repository at git@github.com:AgentrDev/universal-mcp.git
8
+ 2. Clone your fork
9
+ 3. Create a new branch for your changes:
10
+ ```bash
11
+ git checkout -b feature/your-feature-name
12
+ ```
13
+
14
+ 4. Install development dependencies:
15
+ ```bash
16
+ pip install -e ".[test]"
17
+ ```
18
+
19
+ 5. Make your changes and ensure tests pass:
20
+ ```bash
21
+ pytest
22
+ ```
23
+
24
+ 6. Commit your changes following conventional commits:
25
+ ```bash
26
+ git commit -m "feat: add new feature"
27
+ ```
28
+
29
+ 7. Push to your fork:
30
+ ```bash
31
+ git push origin feature/your-feature-name
32
+ ```
33
+
34
+ 8. Open a Pull Request against the main repository
35
+
36
+ ## 📝 Guidelines
37
+
38
+ ### Code Style
39
+ - Follow PEP 8 guidelines
40
+ - Use type hints where possible
41
+ - Include docstrings for functions and classes
42
+ - Keep functions focused and single-purpose
43
+
44
+ ### Testing
45
+ - Add tests for new features
46
+ - Ensure all tests pass before submitting PR
47
+ - Maintain or improve code coverage
48
+
49
+ ### Pull Requests
50
+ - Keep PRs focused on a single change
51
+ - Include a clear description of changes
52
+ - Reference any related issues
53
+ - Update documentation as needed
54
+
55
+ ### Commit Messages
56
+ Follow conventional commits format:
57
+ - feat: New feature
58
+ - fix: Bug fix
59
+ - docs: Documentation changes
60
+ - style: Code style changes
61
+ - refactor: Code refactoring
62
+ - test: Test updates
63
+ - chore: Maintenance tasks
64
+
65
+ ## 🐛 Reporting Issues
66
+
67
+ - Search existing issues before creating new ones
68
+ - Include clear steps to reproduce
69
+ - Provide system information
70
+ - Add relevant logs or screenshots
71
+
72
+ ## 📚 Documentation
73
+
74
+ - Keep README.md updated
75
+ - Document new features
76
+ - Include docstrings
77
+ - Update CHANGELOG.md for significant changes
78
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: Universal MCP acts as a middle ware for your API applications. It can store your credentials, authorize, enable disable apps on the fly and much more.
5
5
  Author-email: Manoj Bajaj <manojbajaj95@gmail.com>
6
6
  Requires-Python: >=3.11
@@ -10,6 +10,13 @@ Requires-Dist: pydantic-settings>=2.8.1
10
10
  Requires-Dist: pydantic>=2.11.1
11
11
  Requires-Dist: pyyaml>=6.0.2
12
12
  Requires-Dist: typer>=0.15.2
13
+ Provides-Extra: playground
14
+ Requires-Dist: fastapi[standard]>=0.115.12; extra == 'playground'
15
+ Requires-Dist: langchain-anthropic>=0.3.10; extra == 'playground'
16
+ Requires-Dist: langchain-mcp-adapters>=0.0.3; extra == 'playground'
17
+ Requires-Dist: langgraph-checkpoint-sqlite>=2.0.6; extra == 'playground'
18
+ Requires-Dist: langgraph>=0.3.24; extra == 'playground'
19
+ Requires-Dist: streamlit>=1.44.1; extra == 'playground'
13
20
  Provides-Extra: test
14
21
  Requires-Dist: pytest-asyncio>=0.26.0; extra == 'test'
15
22
  Requires-Dist: pytest>=8.3.5; extra == 'test'
@@ -87,6 +94,24 @@ if __name__ == "__main__":
87
94
  server.run()
88
95
  ```
89
96
 
97
+ ## Using Playground
98
+
99
+ Start MCP Server
100
+ ```bash
101
+ universal_mcp run -t sse
102
+ ```
103
+
104
+ Start FastAPI app
105
+ ```bash
106
+ fastapi run src/playground
107
+ ```
108
+
109
+ Start Frontend
110
+ ```bash
111
+ streamlit run src/playground/streamlit.py
112
+ ```
113
+
114
+
90
115
  ## 🧩 Available Applications
91
116
  AgentR comes with several pre-built applications:
92
117
 
@@ -70,6 +70,24 @@ if __name__ == "__main__":
70
70
  server.run()
71
71
  ```
72
72
 
73
+ ## Using Playground
74
+
75
+ Start MCP Server
76
+ ```bash
77
+ universal_mcp run -t sse
78
+ ```
79
+
80
+ Start FastAPI app
81
+ ```bash
82
+ fastapi run src/playground
83
+ ```
84
+
85
+ Start Frontend
86
+ ```bash
87
+ streamlit run src/playground/streamlit.py
88
+ ```
89
+
90
+
73
91
  ## 🧩 Available Applications
74
92
  AgentR comes with several pre-built applications:
75
93
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "universal-mcp"
3
- version = "0.1.0"
3
+ version = "0.1.1"
4
4
  description = "Universal MCP acts as a middle ware for your API applications. It can store your credentials, authorize, enable disable apps on the fly and much more."
5
5
  readme = "README.md"
6
6
  authors = [
@@ -21,11 +21,17 @@ test = [
21
21
  "pytest>=8.3.5",
22
22
  "pytest-asyncio>=0.26.0",
23
23
  ]
24
-
24
+ playground = [
25
+ "fastapi[standard]>=0.115.12",
26
+ "langchain-anthropic>=0.3.10",
27
+ "langchain-mcp-adapters>=0.0.3",
28
+ "langgraph>=0.3.24",
29
+ "langgraph-checkpoint-sqlite>=2.0.6",
30
+ "streamlit>=1.44.1",
31
+ ]
25
32
  [project.scripts]
26
33
  universal_mcp = "universal_mcp.cli:app"
27
34
 
28
35
  [build-system]
29
36
  requires = ["hatchling"]
30
37
  build-backend = "hatchling.build"
31
-
@@ -0,0 +1,3 @@
1
+ from playground.service import app
2
+
3
+ __all__ = ["app"]
@@ -0,0 +1,50 @@
1
+ from contextlib import asynccontextmanager
2
+ from langchain_anthropic import ChatAnthropic
3
+ from langgraph.prebuilt import create_react_agent
4
+ from langchain_core.messages import HumanMessage
5
+ import asyncio
6
+ from langchain_mcp_adapters.client import MultiServerMCPClient
7
+
8
+
9
+ @asynccontextmanager
10
+ async def load_tools():
11
+ async with MultiServerMCPClient(
12
+ {
13
+ "agentr": {
14
+ "url": "http://localhost:8005/sse",
15
+ "transport": "sse",
16
+ },
17
+ }
18
+ ) as client:
19
+ tools = client.get_tools()
20
+ yield tools
21
+
22
+
23
+ @asynccontextmanager
24
+ async def create_agent():
25
+ llm = ChatAnthropic(model="claude-3-5-sonnet-latest")
26
+ async with load_tools() as tools:
27
+ yield create_react_agent(
28
+ model=llm,
29
+ tools=tools,
30
+ debug=False,
31
+ )
32
+
33
+
34
+ async def main():
35
+ async with create_agent() as agent:
36
+ print("Welcome to the agent!")
37
+ messages = []
38
+ while True:
39
+ human_input = input("Enter your message: ")
40
+ if human_input.lower() in ["exit", "quit", "q"]:
41
+ break
42
+ messages.append(HumanMessage(content=human_input))
43
+ results = await agent.ainvoke({"messages": messages})
44
+ ai_message = results["messages"][-1]
45
+ messages.append(ai_message)
46
+ print(ai_message.content)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ asyncio.run(main())
@@ -0,0 +1,288 @@
1
+ import json
2
+ import os
3
+ from collections.abc import AsyncGenerator, Generator
4
+ from typing import Any
5
+
6
+ import httpx
7
+
8
+ from playground.schema import (
9
+ ChatHistory,
10
+ ChatHistoryInput,
11
+ ChatMessage,
12
+ StreamInput,
13
+ UserInput,
14
+ )
15
+
16
+
17
+ class AgentClientError(Exception):
18
+ pass
19
+
20
+
21
+ class AgentClient:
22
+ """Client for interacting with the agent service."""
23
+
24
+ def __init__(
25
+ self,
26
+ base_url: str = "http://0.0.0.0",
27
+ timeout: float | None = None,
28
+ ) -> None:
29
+ """
30
+ Initialize the client.
31
+
32
+ Args:
33
+ base_url (str): The base URL of the agent service.
34
+ agent (str): The name of the default agent to use.
35
+ timeout (float, optional): The timeout for requests.
36
+ get_info (bool, optional): Whether to fetch agent information on init.
37
+ Default: True
38
+ """
39
+ self.base_url = base_url
40
+ self.auth_secret = os.getenv("AUTH_SECRET")
41
+ self.timeout = timeout
42
+
43
+ @property
44
+ def _headers(self) -> dict[str, str]:
45
+ headers = {}
46
+ if self.auth_secret:
47
+ headers["Authorization"] = f"Bearer {self.auth_secret}"
48
+ return headers
49
+
50
+ def retrieve_info(self) -> None:
51
+ try:
52
+ response = httpx.get(
53
+ f"{self.base_url}/info",
54
+ headers=self._headers,
55
+ timeout=self.timeout,
56
+ )
57
+ response.raise_for_status()
58
+ except httpx.HTTPError as e:
59
+ raise AgentClientError(f"Error getting service info: {e}")
60
+
61
+ async def ainvoke(
62
+ self,
63
+ message: str,
64
+ model: str | None = None,
65
+ thread_id: str | None = None,
66
+ agent_config: dict[str, Any] | None = None,
67
+ ) -> ChatMessage:
68
+ """
69
+ Invoke the agent asynchronously. Only the final message is returned.
70
+
71
+ Args:
72
+ message (str): The message to send to the agent
73
+ model (str, optional): LLM model to use for the agent
74
+ thread_id (str, optional): Thread ID for continuing a conversation
75
+ agent_config (dict[str, Any], optional): Additional configuration to pass through to the agent
76
+
77
+ Returns:
78
+ AnyMessage: The response from the agent
79
+ """
80
+ request = UserInput(message=message)
81
+ if thread_id:
82
+ request.thread_id = thread_id
83
+ if agent_config:
84
+ request.agent_config = agent_config
85
+ async with httpx.AsyncClient() as client:
86
+ try:
87
+ response = await client.post(
88
+ f"{self.base_url}/invoke",
89
+ json=request.model_dump(),
90
+ headers=self._headers,
91
+ timeout=self.timeout,
92
+ )
93
+ response.raise_for_status()
94
+ except httpx.HTTPError as e:
95
+ raise AgentClientError(f"Error: {e}")
96
+
97
+ return ChatMessage.model_validate(response.json())
98
+
99
+ def invoke(
100
+ self,
101
+ message: str,
102
+ model: str | None = None,
103
+ thread_id: str | None = None,
104
+ agent_config: dict[str, Any] | None = None,
105
+ ) -> ChatMessage:
106
+ """
107
+ Invoke the agent synchronously. Only the final message is returned.
108
+
109
+ Args:
110
+ message (str): The message to send to the agent
111
+ model (str, optional): LLM model to use for the agent
112
+ thread_id (str, optional): Thread ID for continuing a conversation
113
+ agent_config (dict[str, Any], optional): Additional configuration to pass through to the agent
114
+
115
+ Returns:
116
+ ChatMessage: The response from the agent
117
+ """
118
+ request = UserInput(message=message)
119
+ if thread_id:
120
+ request.thread_id = thread_id
121
+ if model:
122
+ request.model = model
123
+ if agent_config:
124
+ request.agent_config = agent_config
125
+ try:
126
+ response = httpx.post(
127
+ f"{self.base_url}/invoke",
128
+ json=request.model_dump(),
129
+ headers=self._headers,
130
+ timeout=self.timeout,
131
+ )
132
+ response.raise_for_status()
133
+ except httpx.HTTPError as e:
134
+ raise AgentClientError(f"Error: {e}")
135
+
136
+ return ChatMessage.model_validate(response.json())
137
+
138
+ def _parse_stream_line(self, line: str) -> ChatMessage | str | None:
139
+ line = line.strip()
140
+ if line.startswith("data: "):
141
+ data = line[6:]
142
+ if data == "[DONE]":
143
+ return None
144
+ try:
145
+ parsed = json.loads(data)
146
+ except Exception as e:
147
+ raise Exception(f"Error JSON parsing message from server: {e}")
148
+ match parsed["type"]:
149
+ case "message":
150
+ # Convert the JSON formatted message to an AnyMessage
151
+ try:
152
+ return ChatMessage.model_validate(parsed["content"])
153
+ except Exception as e:
154
+ raise Exception(f"Server returned invalid message: {e}")
155
+ case "token":
156
+ # Yield the str token directly
157
+ return parsed["content"]
158
+ case "error":
159
+ raise Exception(parsed["content"])
160
+ return None
161
+
162
+ def stream(
163
+ self,
164
+ message: str,
165
+ model: str | None = None,
166
+ thread_id: str | None = None,
167
+ agent_config: dict[str, Any] | None = None,
168
+ stream_tokens: bool = True,
169
+ ) -> Generator[ChatMessage | str, None, None]:
170
+ """
171
+ Stream the agent's response synchronously.
172
+
173
+ Each intermediate message of the agent process is yielded as a ChatMessage.
174
+ If stream_tokens is True (the default value), the response will also yield
175
+ content tokens from streaming models as they are generated.
176
+
177
+ Args:
178
+ message (str): The message to send to the agent
179
+ model (str, optional): LLM model to use for the agent
180
+ thread_id (str, optional): Thread ID for continuing a conversation
181
+ agent_config (dict[str, Any], optional): Additional configuration to pass through to the agent
182
+ stream_tokens (bool, optional): Stream tokens as they are generated
183
+ Default: True
184
+
185
+ Returns:
186
+ Generator[ChatMessage | str, None, None]: The response from the agent
187
+ """
188
+ request = StreamInput(message=message, stream_tokens=stream_tokens)
189
+ if thread_id:
190
+ request.thread_id = thread_id
191
+ if model:
192
+ request.model = model
193
+ if agent_config:
194
+ request.agent_config = agent_config
195
+ try:
196
+ with httpx.stream(
197
+ "POST",
198
+ f"{self.base_url}/stream",
199
+ json=request.model_dump(),
200
+ headers=self._headers,
201
+ timeout=self.timeout,
202
+ ) as response:
203
+ response.raise_for_status()
204
+ for line in response.iter_lines():
205
+ if line.strip():
206
+ parsed = self._parse_stream_line(line)
207
+ if parsed is None:
208
+ break
209
+ yield parsed
210
+ except httpx.HTTPError as e:
211
+ raise AgentClientError(f"Error: {e}")
212
+
213
+ async def astream(
214
+ self,
215
+ message: str,
216
+ model: str | None = None,
217
+ thread_id: str | None = None,
218
+ agent_config: dict[str, Any] | None = None,
219
+ stream_tokens: bool = True,
220
+ ) -> AsyncGenerator[ChatMessage | str, None]:
221
+ """
222
+ Stream the agent's response asynchronously.
223
+
224
+ Each intermediate message of the agent process is yielded as an AnyMessage.
225
+ If stream_tokens is True (the default value), the response will also yield
226
+ content tokens from streaming modelsas they are generated.
227
+
228
+ Args:
229
+ message (str): The message to send to the agent
230
+ model (str, optional): LLM model to use for the agent
231
+ thread_id (str, optional): Thread ID for continuing a conversation
232
+ agent_config (dict[str, Any], optional): Additional configuration to pass through to the agent
233
+ stream_tokens (bool, optional): Stream tokens as they are generated
234
+ Default: True
235
+
236
+ Returns:
237
+ AsyncGenerator[ChatMessage | str, None]: The response from the agent
238
+ """
239
+
240
+ request = StreamInput(message=message, stream_tokens=stream_tokens)
241
+ if thread_id:
242
+ request.thread_id = thread_id
243
+ if model:
244
+ request.model = model
245
+ if agent_config:
246
+ request.agent_config = agent_config
247
+ async with httpx.AsyncClient() as client:
248
+ try:
249
+ async with client.stream(
250
+ "POST",
251
+ f"{self.base_url}/stream",
252
+ json=request.model_dump(),
253
+ headers=self._headers,
254
+ timeout=self.timeout,
255
+ ) as response:
256
+ response.raise_for_status()
257
+ async for line in response.aiter_lines():
258
+ if line.strip():
259
+ parsed = self._parse_stream_line(line)
260
+ if parsed is None:
261
+ break
262
+ yield parsed
263
+ except httpx.HTTPError as e:
264
+ raise AgentClientError(f"Error: {e}")
265
+
266
+ def get_history(
267
+ self,
268
+ thread_id: str,
269
+ ) -> ChatHistory:
270
+ """
271
+ Get chat history.
272
+
273
+ Args:
274
+ thread_id (str, optional): Thread ID for identifying a conversation
275
+ """
276
+ request = ChatHistoryInput(thread_id=thread_id)
277
+ try:
278
+ response = httpx.post(
279
+ f"{self.base_url}/history",
280
+ json=request.model_dump(),
281
+ headers=self._headers,
282
+ timeout=self.timeout,
283
+ )
284
+ response.raise_for_status()
285
+ except httpx.HTTPError as e:
286
+ raise AgentClientError(f"Error: {e}")
287
+
288
+ return ChatHistory.model_validate(response.json())
@@ -0,0 +1,14 @@
1
+ from langgraph.checkpoint.base import BaseCheckpointSaver
2
+
3
+ from playground.memory.sqlite import get_sqlite_saver
4
+
5
+
6
+ def initialize_database() -> BaseCheckpointSaver:
7
+ """
8
+ Initialize the appropriate database checkpointer based on configuration.
9
+ Returns an initialized AsyncCheckpointer instance.
10
+ """
11
+ return get_sqlite_saver()
12
+
13
+
14
+ __all__ = ["initialize_database"]
@@ -0,0 +1,9 @@
1
+ from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
3
+
4
+ from playground.settings import settings
5
+
6
+
7
+ def get_sqlite_saver() -> BaseCheckpointSaver:
8
+ """Initialize and return a SQLite saver instance."""
9
+ return AsyncSqliteSaver.from_conn_string(settings.SQLITE_DB_PATH)