unimcp 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unimcp-0.1.0/.gitignore +41 -0
- unimcp-0.1.0/PKG-INFO +87 -0
- unimcp-0.1.0/README.md +70 -0
- unimcp-0.1.0/error.log +32 -0
- unimcp-0.1.0/pyproject.toml +28 -0
- unimcp-0.1.0/reference/client.py +138 -0
- unimcp-0.1.0/reference/client_architecture.md +90 -0
- unimcp-0.1.0/reference/server.py +78 -0
- unimcp-0.1.0/sc.py +19 -0
- unimcp-0.1.0/src/unimcp/__init__.py +9 -0
- unimcp-0.1.0/src/unimcp/client.py +57 -0
- unimcp-0.1.0/src/unimcp/llm.py +97 -0
- unimcp-0.1.0/tests/__init__.py +1 -0
- unimcp-0.1.0/user_data.json +8 -0
unimcp-0.1.0/.gitignore
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# Distribution / packaging
|
|
7
|
+
build/
|
|
8
|
+
develop-eggs/
|
|
9
|
+
dist/
|
|
10
|
+
downloads/
|
|
11
|
+
eggs/
|
|
12
|
+
.eggs/
|
|
13
|
+
lib/
|
|
14
|
+
lib64/
|
|
15
|
+
parts/
|
|
16
|
+
sdist/
|
|
17
|
+
var/
|
|
18
|
+
wheels/
|
|
19
|
+
share/python-wheels/
|
|
20
|
+
*.egg-info/
|
|
21
|
+
.installed.cfg
|
|
22
|
+
*.egg
|
|
23
|
+
MANIFEST
|
|
24
|
+
|
|
25
|
+
# Environments
|
|
26
|
+
.env
|
|
27
|
+
.venv
|
|
28
|
+
env/
|
|
29
|
+
venv/
|
|
30
|
+
ENV/
|
|
31
|
+
env.bak/
|
|
32
|
+
venv.bak/
|
|
33
|
+
|
|
34
|
+
# IDEs
|
|
35
|
+
.vscode/
|
|
36
|
+
.idea/
|
|
37
|
+
|
|
38
|
+
# Testing
|
|
39
|
+
.pytest_cache/
|
|
40
|
+
htmlcov/
|
|
41
|
+
.coverage
|
unimcp-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: unimcp
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A simple client library to connect to any MCP server and interact with LLMs.
|
|
5
|
+
Project-URL: Homepage, https://github.com/barada02/UniMCP
|
|
6
|
+
Project-URL: Bug Tracker, https://github.com/barada02/UniMCP/issues
|
|
7
|
+
Author-email: Chandan Kumar Barad <chandanbarada2@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
|
+
Requires-Dist: mcp==1.27.0
|
|
14
|
+
Requires-Dist: openai==2.33.0
|
|
15
|
+
Requires-Dist: python-dotenv
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
|
|
18
|
+
# UniMCP
|
|
19
|
+
|
|
20
|
+
A simple client library to connect to any MCP server and interact with tools seamlessly via code or through an LLM.
|
|
21
|
+
|
|
22
|
+
## Installation
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
pip install unimcp
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
*(Note: While in development, you can install it locally using `pip install -e .`)*
|
|
29
|
+
|
|
30
|
+
## Features
|
|
31
|
+
- **Simple MCP Client**: Connect to any MCP server, list available tools, and call them directly with just a few lines of Python.
|
|
32
|
+
- **LLM Integration**: Built-in wrapper to hook your MCP server tools directly into OpenAI (or any OpenAI-compatible API), enabling an LLM agent out-of-the-box.
|
|
33
|
+
|
|
34
|
+
## Usage
|
|
35
|
+
|
|
36
|
+
### 1. Using only the MCP Client (No LLM)
|
|
37
|
+
|
|
38
|
+
```python
|
|
39
|
+
import asyncio
|
|
40
|
+
from unimcp import UniClient
|
|
41
|
+
|
|
42
|
+
async def main():
|
|
43
|
+
# Connect to your MCP server
|
|
44
|
+
async with UniClient("http://localhost:8000/sse") as client:
|
|
45
|
+
# 1. Get available tools
|
|
46
|
+
tools = await client.get_tools()
|
|
47
|
+
print("Available tools:", [t.name for t in tools])
|
|
48
|
+
|
|
49
|
+
# 2. Call a specific tool manually
|
|
50
|
+
result = await client.call_tool("my_tool_name", {"arg1": "value"})
|
|
51
|
+
print("Tool Result:", result)
|
|
52
|
+
|
|
53
|
+
if __name__ == "__main__":
|
|
54
|
+
asyncio.run(main())
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### 2. Using the LLM Client
|
|
58
|
+
|
|
59
|
+
This utilizes OpenAI's python library under the hood. You can configure it using environment variables (`OPENAI_API_KEY`, `OPENAI_BASE_URL`, `OPENAI_MODEL`) or pass them directly.
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
import asyncio
|
|
63
|
+
from unimcp import UniClient, UniLLM
|
|
64
|
+
|
|
65
|
+
async def main():
|
|
66
|
+
async with UniClient("http://localhost:8000/sse") as client:
|
|
67
|
+
|
|
68
|
+
# Initialize the LLM with the MCP client
|
|
69
|
+
# It automatically detects OPENAI_API_KEY from environment variables
|
|
70
|
+
llm = UniLLM(client, model_name="gpt-4o")
|
|
71
|
+
|
|
72
|
+
# Optionally set a system prompt
|
|
73
|
+
llm.set_system_prompt("You are a helpful assistant with access to MCP tools.")
|
|
74
|
+
|
|
75
|
+
# Chat with the LLM (it will automatically use the tools if needed)
|
|
76
|
+
response = await llm.chat("Can you perform an action using your tools?")
|
|
77
|
+
print("AI:", response)
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
asyncio.run(main())
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Environment Variables
|
|
84
|
+
The `UniLLM` automatically respects the following standard environment variables:
|
|
85
|
+
- `OPENAI_API_KEY`: Your API key.
|
|
86
|
+
- `OPENAI_BASE_URL`: For connecting to local LLMs (like LMStudio, Ollama) or other providers.
|
|
87
|
+
- `OPENAI_MODEL`: Default model to use (fallback is `gpt-4o`).
|
unimcp-0.1.0/README.md
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# UniMCP
|
|
2
|
+
|
|
3
|
+
A simple client library to connect to any MCP server and interact with tools seamlessly via code or through an LLM.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install unimcp
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
*(Note: While in development, you can install it locally using `pip install -e .`)*
|
|
12
|
+
|
|
13
|
+
## Features
|
|
14
|
+
- **Simple MCP Client**: Connect to any MCP server, list available tools, and call them directly with just a few lines of Python.
|
|
15
|
+
- **LLM Integration**: Built-in wrapper to hook your MCP server tools directly into OpenAI (or any OpenAI-compatible API), enabling an LLM agent out-of-the-box.
|
|
16
|
+
|
|
17
|
+
## Usage
|
|
18
|
+
|
|
19
|
+
### 1. Using only the MCP Client (No LLM)
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
import asyncio
|
|
23
|
+
from unimcp import UniClient
|
|
24
|
+
|
|
25
|
+
async def main():
|
|
26
|
+
# Connect to your MCP server
|
|
27
|
+
async with UniClient("http://localhost:8000/sse") as client:
|
|
28
|
+
# 1. Get available tools
|
|
29
|
+
tools = await client.get_tools()
|
|
30
|
+
print("Available tools:", [t.name for t in tools])
|
|
31
|
+
|
|
32
|
+
# 2. Call a specific tool manually
|
|
33
|
+
result = await client.call_tool("my_tool_name", {"arg1": "value"})
|
|
34
|
+
print("Tool Result:", result)
|
|
35
|
+
|
|
36
|
+
if __name__ == "__main__":
|
|
37
|
+
asyncio.run(main())
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### 2. Using the LLM Client
|
|
41
|
+
|
|
42
|
+
This utilizes OpenAI's python library under the hood. You can configure it using environment variables (`OPENAI_API_KEY`, `OPENAI_BASE_URL`, `OPENAI_MODEL`) or pass them directly.
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import asyncio
|
|
46
|
+
from unimcp import UniClient, UniLLM
|
|
47
|
+
|
|
48
|
+
async def main():
|
|
49
|
+
async with UniClient("http://localhost:8000/sse") as client:
|
|
50
|
+
|
|
51
|
+
# Initialize the LLM with the MCP client
|
|
52
|
+
# It automatically detects OPENAI_API_KEY from environment variables
|
|
53
|
+
llm = UniLLM(client, model_name="gpt-4o")
|
|
54
|
+
|
|
55
|
+
# Optionally set a system prompt
|
|
56
|
+
llm.set_system_prompt("You are a helpful assistant with access to MCP tools.")
|
|
57
|
+
|
|
58
|
+
# Chat with the LLM (it will automatically use the tools if needed)
|
|
59
|
+
response = await llm.chat("Can you perform an action using your tools?")
|
|
60
|
+
print("AI:", response)
|
|
61
|
+
|
|
62
|
+
if __name__ == "__main__":
|
|
63
|
+
asyncio.run(main())
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Environment Variables
|
|
67
|
+
The `UniLLM` automatically respects the following standard environment variables:
|
|
68
|
+
- `OPENAI_API_KEY`: Your API key.
|
|
69
|
+
- `OPENAI_BASE_URL`: For connecting to local LLMs (like LMStudio, Ollama) or other providers.
|
|
70
|
+
- `OPENAI_MODEL`: Default model to use (fallback is `gpt-4o`).
|
unimcp-0.1.0/error.log
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
(venv) (base) PS C:\Users\barad\Desktop\projects\UniMCP> python sc.py
|
|
2
|
+
Traceback (most recent call last):
|
|
3
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\sc.py", line 19, in <module>
|
|
4
|
+
asyncio.run(main())
|
|
5
|
+
~~~~~~~~~~~^^^^^^^^
|
|
6
|
+
File "C:\Users\barad\anaconda3\Lib\asyncio\runners.py", line 195, in run
|
|
7
|
+
return runner.run(main)
|
|
8
|
+
~~~~~~~~~~^^^^^^
|
|
9
|
+
File "C:\Users\barad\anaconda3\Lib\asyncio\runners.py", line 118, in run
|
|
10
|
+
return self._loop.run_until_complete(task)
|
|
11
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
|
|
12
|
+
File "C:\Users\barad\anaconda3\Lib\asyncio\base_events.py", line 725, in run_until_complete
|
|
13
|
+
return future.result()
|
|
14
|
+
~~~~~~~~~~~~~^^
|
|
15
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\sc.py", line 16, in main
|
|
16
|
+
response = await llm.chat("hi can you save my friends name dinesh and his phone number is 9009876543 and his email id is dinesh@gmail.com ")
|
|
17
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
18
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\src\unimcp\llm.py", line 73, in chat
|
|
19
|
+
response = await self.client.chat.completions.create(**chat_kwargs)
|
|
20
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
21
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\venv\Lib\site-packages\openai\resources\chat\completions\completions.py", line 2714, in create
|
|
22
|
+
return await self._post(
|
|
23
|
+
^^^^^^^^^^^^^^^^^
|
|
24
|
+
...<49 lines>...
|
|
25
|
+
)
|
|
26
|
+
^
|
|
27
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\venv\Lib\site-packages\openai\_base_client.py", line 1913, in post
|
|
28
|
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
|
29
|
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
30
|
+
File "C:\Users\barad\Desktop\projects\UniMCP\venv\Lib\site-packages\openai\_base_client.py", line 1698, in request
|
|
31
|
+
raise self._make_status_error_from_response(err.response) from None
|
|
32
|
+
openai.NotFoundError: Error code: 404 - [{'error': {'code': 404, 'message': 'models/gpt-4o is not found for API version v1main, or is not supported for generateContent. Call ListModels to see the list of available models and their supported methods.', 'status': 'NOT_FOUND'}}]
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "unimcp"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "A simple client library to connect to any MCP server and interact with LLMs."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "Chandan Kumar Barad", email = "chandanbarada2@gmail.com" },
|
|
14
|
+
]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Programming Language :: Python :: 3",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Operating System :: OS Independent",
|
|
19
|
+
]
|
|
20
|
+
dependencies = [
|
|
21
|
+
"mcp==1.27.0",
|
|
22
|
+
"openai==2.33.0",
|
|
23
|
+
"python-dotenv"
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
[project.urls]
|
|
27
|
+
"Homepage" = "https://github.com/barada02/UniMCP"
|
|
28
|
+
"Bug Tracker" = "https://github.com/barada02/UniMCP/issues"
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import json
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
from mcp import ClientSession
|
|
7
|
+
from mcp.client.sse import sse_client
|
|
8
|
+
|
|
9
|
+
# Load environment variables
|
|
10
|
+
load_dotenv()
|
|
11
|
+
|
|
12
|
+
# Initialize OpenAI Client
|
|
13
|
+
# Base URL and API Key are automatically pulled from OPENAI_BASE_URL and OPENAI_API_KEY env variables
|
|
14
|
+
openai_client = AsyncOpenAI()
|
|
15
|
+
MODEL_NAME = os.getenv("OPENAI_MODEL", "gpt-4o")
|
|
16
|
+
|
|
17
|
+
def convert_mcp_to_openai_tools(mcp_tools) -> list:
|
|
18
|
+
"""
|
|
19
|
+
Helper function to convert MCP tool definitions into the format
|
|
20
|
+
that the OpenAI API expects.
|
|
21
|
+
"""
|
|
22
|
+
openai_tools = []
|
|
23
|
+
for tool in mcp_tools:
|
|
24
|
+
openai_tools.append({
|
|
25
|
+
"type": "function",
|
|
26
|
+
"function": {
|
|
27
|
+
"name": tool.name,
|
|
28
|
+
"description": tool.description,
|
|
29
|
+
# MCP uses JSON Schema for inputs, which matches OpenAI closely
|
|
30
|
+
"parameters": tool.inputSchema
|
|
31
|
+
}
|
|
32
|
+
})
|
|
33
|
+
return openai_tools
|
|
34
|
+
|
|
35
|
+
async def chat_interaction_loop(session: ClientSession):
|
|
36
|
+
"""
|
|
37
|
+
Continuous chat loop that keeps conversation history.
|
|
38
|
+
"""
|
|
39
|
+
print("\n[Client] Initializing session history...")
|
|
40
|
+
|
|
41
|
+
# Ask the MCP server what tools it has available
|
|
42
|
+
tools_response = await session.list_tools()
|
|
43
|
+
available_openai_tools = convert_mcp_to_openai_tools(tools_response.tools)
|
|
44
|
+
print(f"[Client] Server provides {len(available_openai_tools)} tools.")
|
|
45
|
+
print(f"[Client] Tools: {[tool['function']['name'] for tool in available_openai_tools]}")
|
|
46
|
+
#print(f"[Client] tools: {json.dumps(available_openai_tools, indent=2)}")
|
|
47
|
+
|
|
48
|
+
# System prompt gives the AI a personality and rules
|
|
49
|
+
system_prompt = (
|
|
50
|
+
"You are a helpful assistant hooked up to an external database. "
|
|
51
|
+
"You MUST use your provided tools to save user preferences, personal details, and contacts. "
|
|
52
|
+
f"You have [{len(available_openai_tools)}] tools available to interact with the file system. "
|
|
53
|
+
"Whenever the user mentions a fact about themselves or a friend, immediately call the appropriate tool."
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
messages = [
|
|
57
|
+
{
|
|
58
|
+
"role": "system",
|
|
59
|
+
"content": system_prompt
|
|
60
|
+
}
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
print("\n--- Chat Started! Type 'exit' or 'quit' to stop. ---")
|
|
64
|
+
|
|
65
|
+
while True:
|
|
66
|
+
# Prompt user for input
|
|
67
|
+
user_prompt = input("\nYou: ")
|
|
68
|
+
if user_prompt.lower() in ("exit", "quit"):
|
|
69
|
+
print("Goodbye!")
|
|
70
|
+
break
|
|
71
|
+
|
|
72
|
+
messages.append({"role": "user", "content": user_prompt})
|
|
73
|
+
|
|
74
|
+
# We use a loop here because sometimes the AI calls a tool, gets the result,
|
|
75
|
+
# but decides it needs to call ANOTHER tool before answering the user.
|
|
76
|
+
while True:
|
|
77
|
+
try:
|
|
78
|
+
response = await openai_client.chat.completions.create(
|
|
79
|
+
model=MODEL_NAME,
|
|
80
|
+
messages=messages,
|
|
81
|
+
tools=available_openai_tools,
|
|
82
|
+
tool_choice="auto" # Wait, some models need this explicitly set
|
|
83
|
+
)
|
|
84
|
+
except Exception as e:
|
|
85
|
+
print(f"\n[Error] The LLM Provider returned an error: {e}")
|
|
86
|
+
print("[Hint] If you hit a rate limit, try waiting a minute, or change the model in your .env file.")
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
response_message = response.choices[0].message
|
|
90
|
+
# We MUST append the assistant's message to the conversation history
|
|
91
|
+
# either as a text response or a tool call response
|
|
92
|
+
messages.append(response_message)
|
|
93
|
+
|
|
94
|
+
# If the LLM decides to call a tool (or multiple tools)
|
|
95
|
+
if response_message.tool_calls:
|
|
96
|
+
for tool_call in response_message.tool_calls:
|
|
97
|
+
tool_name = tool_call.function.name
|
|
98
|
+
tool_args = json.loads(tool_call.function.arguments)
|
|
99
|
+
|
|
100
|
+
print(f"\n[Processing] Calling tool '{tool_name}' with args: {tool_args}")
|
|
101
|
+
|
|
102
|
+
# Call the tool on the MCP server
|
|
103
|
+
result = await session.call_tool(tool_name, arguments=tool_args)
|
|
104
|
+
|
|
105
|
+
# Read the response text from the server
|
|
106
|
+
tool_result_str = ""
|
|
107
|
+
for content in result.content:
|
|
108
|
+
if content.type == "text":
|
|
109
|
+
tool_result_str += content.text
|
|
110
|
+
|
|
111
|
+
print(f"[Processing] Server replied: {tool_result_str.strip()}")
|
|
112
|
+
|
|
113
|
+
# Feed the result back to the LLM
|
|
114
|
+
messages.append({
|
|
115
|
+
"role": "tool",
|
|
116
|
+
"tool_call_id": tool_call.id,
|
|
117
|
+
"name": tool_name,
|
|
118
|
+
"content": tool_result_str
|
|
119
|
+
})
|
|
120
|
+
# After feeding all tool results, let the `while True` loop run again
|
|
121
|
+
# so the LLM can generate its final response (or call another tool!)
|
|
122
|
+
else:
|
|
123
|
+
# No more tools requested, the LLM has given us text response
|
|
124
|
+
print(f"AI: {response_message.content}")
|
|
125
|
+
break # Break inner loop, go back to waiting for User input
|
|
126
|
+
|
|
127
|
+
async def main():
|
|
128
|
+
print("Connecting to remote MCP server via SSE...")
|
|
129
|
+
async with sse_client(url="http://localhost:8000/sse") as (read_stream, write_stream):
|
|
130
|
+
async with ClientSession(read_stream, write_stream) as session:
|
|
131
|
+
await session.initialize()
|
|
132
|
+
print("Connected and initialized successfully!")
|
|
133
|
+
|
|
134
|
+
# Start interactive chat
|
|
135
|
+
await chat_interaction_loop(session)
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# MCP Client Architecture & Dataflow
|
|
2
|
+
|
|
3
|
+
This document explains exactly how our custom MCP Client (`client.py`) operates.
|
|
4
|
+
|
|
5
|
+
A common misconception is that the "AI executes tools." **This is false.** An LLM is entirely isolated; it is merely a text predicting engine. It cannot access your file system or the internet on its own.
|
|
6
|
+
|
|
7
|
+
The **Client** is the true powerhouse. It acts as the Orchestrator that sits in the middle of three entities:
|
|
8
|
+
1. **The User** (Providing prompts)
|
|
9
|
+
2. **The LLM** (Making decisions and formatting text)
|
|
10
|
+
3. **The MCP Server** (Performing actual programmatic actions)
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## 1. High-Level Dataflow
|
|
15
|
+
|
|
16
|
+
```mermaid
|
|
17
|
+
graph TD
|
|
18
|
+
User((User)) <-->|Text Prompts / Text Answers| Client[MCP Client orchestrator]
|
|
19
|
+
Client <-->|SSE / JSON-RPC\nExecutes Tools| Server[(MCP Server)]
|
|
20
|
+
Client <-->|REST API\nSends Prompts & Gets JSON| LLM{LLM API}
|
|
21
|
+
Server <-->|File I/O| FS[(Local File System / OS)]
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
## 2. Step-by-Step Breakdown of the Workflow
|
|
27
|
+
|
|
28
|
+
### Phase 1: Initialization & Tool Discovery
|
|
29
|
+
Before the user even types a message, the Client needs to figure out what the MCP Server can do.
|
|
30
|
+
|
|
31
|
+
1. **Connect:** The Client establishes an SSE (Server-Sent Events) connection to `http://localhost:8000/sse`.
|
|
32
|
+
2. **List Tools:** The Client asks the MCP Server, *"What tools do you have?"*
|
|
33
|
+
3. **Schema Conversion:** MCP SDK returns tool definitions in standard JSON-Schema format. The Client converts these into the strict `{"type": "function", ...}` schema format required by the OpenAI/LLM API.
|
|
34
|
+
|
|
35
|
+
### Phase 2: The Chat & Execution Loop
|
|
36
|
+
This is the core of the client design. It utilizes a continuous **loop-within-a-loop** architecture.
|
|
37
|
+
|
|
38
|
+
```mermaid
|
|
39
|
+
sequenceDiagram
|
|
40
|
+
autonumber
|
|
41
|
+
participant U as User
|
|
42
|
+
participant C as Client (Python)
|
|
43
|
+
participant L as LLM (OpenAI API)
|
|
44
|
+
participant S as MCP Server (FastMCP)
|
|
45
|
+
|
|
46
|
+
U->>C: "Save my name as John"
|
|
47
|
+
|
|
48
|
+
rect rgba(128, 128, 128, 0.2)
|
|
49
|
+
Note over C, L: The Execution Loop
|
|
50
|
+
C->>L: Send [History] + [Available Tools]
|
|
51
|
+
L-->>C: Response: Tool Call Requested! <br/> {"name": "save_user_detail", "args": {"key": "name", "value": "John"}}
|
|
52
|
+
|
|
53
|
+
Note over C, S: The Client physically executes the tool
|
|
54
|
+
C->>S: JSON-RPC: Call Tool `save_user_detail`
|
|
55
|
+
S-->>C: Result: "Successfully saved"
|
|
56
|
+
|
|
57
|
+
Note over C, L: Client feeds result back to LLM context
|
|
58
|
+
C->>L: Send [History] + [Tool Result: "Successfully saved"]
|
|
59
|
+
L-->>C: Response: Pure Text <br/> "I have saved your name as John!"
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
C->>U: "I have saved your name as John!"
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## 3. Why the Infinite Inner Loop? (`while True:`)
|
|
68
|
+
|
|
69
|
+
In `client.py`, you will notice an inner `while True:` loop wrapped around the LLM API call. Why do we need this?
|
|
70
|
+
|
|
71
|
+
**Because an LLM might need to chain multiple tools together to fulfill a single prompt.**
|
|
72
|
+
|
|
73
|
+
Imagine the user asks: *"What is my name, and what is my system OS?"*
|
|
74
|
+
|
|
75
|
+
1. **Iteration 1:** The LLM receives the prompt. It outputs a JSON token requesting to call `get_all_data`.
|
|
76
|
+
2. The Client parses this, calls the server, and gets the JSON string back. The Client appends this to the message history.
|
|
77
|
+
3. **Iteration 2 (Inside the loop):** The Client immediately sends the updated history back to the LLM. The LLM reads the history, sees it now has the user's name, but realizes it *still* needs the OS. It outputs another JSON token requesting to call `get_system_status`.
|
|
78
|
+
4. The Client parses this, calls the server, gets the OS, and appends it to the history.
|
|
79
|
+
5. **Iteration 3 (Inside the loop):** The Client sends the history to the LLM *again*. This time, the LLM has all the pieces. It generates the final conversational text.
|
|
80
|
+
6. The Client detects this is text (not a tool call payload), **breaks the inner loop**, and prints the final answer to the user.
|
|
81
|
+
|
|
82
|
+
## Summary
|
|
83
|
+
|
|
84
|
+
The LLM is just a brain that looks at the available functions and outputs JSON payloads saying *"I think you should run this function with these arguments"*.
|
|
85
|
+
|
|
86
|
+
**The MCP Client:**
|
|
87
|
+
1. Parses those JSON requests.
|
|
88
|
+
2. Actually makes the network/local calls to execute the functions on the MCP Server.
|
|
89
|
+
3. Captures the physical output of the system.
|
|
90
|
+
4. Feeds that output back to the LLM so it knows what happened.
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from mcp.server.fastmcp import FastMCP
|
|
2
|
+
import platform
|
|
3
|
+
import datetime
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
# Create server with a specific name
|
|
8
|
+
mcp = FastMCP("remote-system-notes")
|
|
9
|
+
|
|
10
|
+
DB_FILE = "user_data.json"
|
|
11
|
+
|
|
12
|
+
def _load_data() -> dict:
|
|
13
|
+
if not os.path.exists(DB_FILE):
|
|
14
|
+
return {}
|
|
15
|
+
with open(DB_FILE, "r") as f:
|
|
16
|
+
return json.load(f)
|
|
17
|
+
|
|
18
|
+
def _save_data(data: dict):
|
|
19
|
+
with open(DB_FILE, "w") as f:
|
|
20
|
+
json.dump(data, f, indent=4)
|
|
21
|
+
|
|
22
|
+
@mcp.tool()
|
|
23
|
+
def get_system_status() -> str:
|
|
24
|
+
"""
|
|
25
|
+
Get the current system status, including OS details and current time.
|
|
26
|
+
"""
|
|
27
|
+
os_info = platform.system() + " " + platform.release()
|
|
28
|
+
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
29
|
+
return f"System: {os_info}\nCurrent Time: {current_time}\nStatus: All systems operational."
|
|
30
|
+
|
|
31
|
+
@mcp.tool()
|
|
32
|
+
def get_all_data() -> str:
|
|
33
|
+
"""Read and retrieve all saved user data and contacts from the database."""
|
|
34
|
+
data = _load_data()
|
|
35
|
+
if not data:
|
|
36
|
+
return "The database is currently empty."
|
|
37
|
+
return json.dumps(data, indent=2)
|
|
38
|
+
|
|
39
|
+
@mcp.tool()
|
|
40
|
+
def save_user_detail(key: str, value: str) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Save or update a piece of personal information about the user (e.g., 'name', 'age', 'email').
|
|
43
|
+
"""
|
|
44
|
+
data = _load_data()
|
|
45
|
+
if "user" not in data:
|
|
46
|
+
data["user"] = {}
|
|
47
|
+
data["user"][key] = value
|
|
48
|
+
_save_data(data)
|
|
49
|
+
return f"Successfully saved user detail: {key} = {value}"
|
|
50
|
+
|
|
51
|
+
@mcp.tool()
|
|
52
|
+
def save_contact(name: str, phone: str = "", email: str = "") -> str:
|
|
53
|
+
"""
|
|
54
|
+
Save or update a contact/friend's details.
|
|
55
|
+
Must provide the name, and optionally a phone number or email string.
|
|
56
|
+
"""
|
|
57
|
+
data = _load_data()
|
|
58
|
+
if "contacts" not in data:
|
|
59
|
+
data["contacts"] = {}
|
|
60
|
+
data["contacts"][name] = {"phone": phone, "email": email}
|
|
61
|
+
_save_data(data)
|
|
62
|
+
return f"Contact '{name}' saved successfully."
|
|
63
|
+
|
|
64
|
+
if __name__ == "__main__":
|
|
65
|
+
# FastMCP supports 'sse' (Server-Sent Events) for acting as a remote HTTP server
|
|
66
|
+
# Note: running this requires 'uvicorn' and 'starlette' (pip install uvicorn starlette)
|
|
67
|
+
import sys
|
|
68
|
+
print("Starting Remote MCP Server on http://localhost:8000/sse")
|
|
69
|
+
print("Press CTRL+C to close.")
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
mcp.run(transport="sse")
|
|
73
|
+
except KeyboardInterrupt:
|
|
74
|
+
print("\nShutting down server gracefully...")
|
|
75
|
+
sys.exit(0)
|
|
76
|
+
except Exception as e:
|
|
77
|
+
print(f"\nServer stopped unexpectedly: {e}")
|
|
78
|
+
sys.exit(1)
|
unimcp-0.1.0/sc.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from unimcp import UniClient, UniLLM
|
|
3
|
+
import os
|
|
4
|
+
import dotenv
|
|
5
|
+
dotenv.load_dotenv()
|
|
6
|
+
|
|
7
|
+
url=os.getenv("OPENAI_BASE_URL")
|
|
8
|
+
token=os.getenv("OPENAI_API_KEY")
|
|
9
|
+
model_name=os.getenv("OPENAI_MODEL")
|
|
10
|
+
|
|
11
|
+
async def main():
|
|
12
|
+
async with UniClient("http://localhost:8000/sse") as client:
|
|
13
|
+
# Pass in base_url, api_key, model_name OR let it read from .env!
|
|
14
|
+
llm = UniLLM(client, model_name=model_name,api_key=token,base_url=url)
|
|
15
|
+
|
|
16
|
+
response = await llm.chat("hi can you save my friends name dinesh and his phone number is 9009876543 and his email id is dinesh@gmail.com ")
|
|
17
|
+
print("AI:", response)
|
|
18
|
+
|
|
19
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Optional, List, Dict, Any
|
|
3
|
+
from mcp import ClientSession
|
|
4
|
+
from mcp.client.sse import sse_client
|
|
5
|
+
from contextlib import AsyncExitStack
|
|
6
|
+
|
|
7
|
+
class UniClient:
|
|
8
|
+
"""
|
|
9
|
+
A unified MCP Client that connects to an MCP server (SSE by default)
|
|
10
|
+
and allows execution of tools.
|
|
11
|
+
"""
|
|
12
|
+
def __init__(self, url: str):
|
|
13
|
+
self.url = url
|
|
14
|
+
self.session: Optional[ClientSession] = None
|
|
15
|
+
self._exit_stack: Optional[AsyncExitStack] = None
|
|
16
|
+
|
|
17
|
+
async def connect(self):
|
|
18
|
+
"""Connects to the MCP server."""
|
|
19
|
+
self._exit_stack = AsyncExitStack()
|
|
20
|
+
read_stream, write_stream = await self._exit_stack.enter_async_context(sse_client(url=self.url))
|
|
21
|
+
self.session = await self._exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
|
|
22
|
+
await self.session.initialize()
|
|
23
|
+
|
|
24
|
+
async def disconnect(self):
|
|
25
|
+
"""Disconnects from the MCP server."""
|
|
26
|
+
if self._exit_stack:
|
|
27
|
+
await self._exit_stack.aclose()
|
|
28
|
+
self._exit_stack = None
|
|
29
|
+
self.session = None
|
|
30
|
+
|
|
31
|
+
async def __aenter__(self):
|
|
32
|
+
await self.connect()
|
|
33
|
+
return self
|
|
34
|
+
|
|
35
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
36
|
+
await self.disconnect()
|
|
37
|
+
|
|
38
|
+
async def get_tools(self) -> List[Any]:
|
|
39
|
+
"""Lists available tools from the MCP server."""
|
|
40
|
+
if not self.session:
|
|
41
|
+
raise RuntimeError("Client not connected. Call connect() first.")
|
|
42
|
+
response = await self.session.list_tools()
|
|
43
|
+
return response.tools
|
|
44
|
+
|
|
45
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> str:
|
|
46
|
+
"""Executes a tool on the MCP server and returns the text result."""
|
|
47
|
+
if not self.session:
|
|
48
|
+
raise RuntimeError("Client not connected. Call connect() first.")
|
|
49
|
+
|
|
50
|
+
result = await self.session.call_tool(tool_name, arguments=arguments)
|
|
51
|
+
|
|
52
|
+
# Read the response text from the server
|
|
53
|
+
tool_result_str = ""
|
|
54
|
+
for content in result.content:
|
|
55
|
+
if content.type == "text":
|
|
56
|
+
tool_result_str += content.text
|
|
57
|
+
return tool_result_str
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
from typing import Optional, List, Dict, Any
|
|
2
|
+
from openai import AsyncOpenAI
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
class UniLLM:
|
|
7
|
+
"""
|
|
8
|
+
A unified LLM wrapper that connects to OpenAI (or any OpenAI-compatible API)
|
|
9
|
+
and automatically executes tools provided by a UniClient.
|
|
10
|
+
"""
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
mcp_client,
|
|
14
|
+
api_key: Optional[str] = None,
|
|
15
|
+
base_url: Optional[str] = None,
|
|
16
|
+
model_name: Optional[str] = None
|
|
17
|
+
):
|
|
18
|
+
self.mcp_client = mcp_client
|
|
19
|
+
self.messages: List[Dict[str, Any]] = []
|
|
20
|
+
|
|
21
|
+
kwargs = {}
|
|
22
|
+
# Priority: arguments > environment variables
|
|
23
|
+
final_api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
24
|
+
if final_api_key:
|
|
25
|
+
kwargs["api_key"] = final_api_key
|
|
26
|
+
|
|
27
|
+
final_base_url = base_url or os.getenv("OPENAI_BASE_URL")
|
|
28
|
+
if final_base_url:
|
|
29
|
+
kwargs["base_url"] = final_base_url
|
|
30
|
+
|
|
31
|
+
self.model_name = model_name or os.getenv("OPENAI_MODEL", "gpt-4o")
|
|
32
|
+
self.client = AsyncOpenAI(**kwargs)
|
|
33
|
+
|
|
34
|
+
def _convert_mcp_to_openai_tools(self, mcp_tools) -> list:
|
|
35
|
+
openai_tools = []
|
|
36
|
+
for tool in mcp_tools:
|
|
37
|
+
openai_tools.append({
|
|
38
|
+
"type": "function",
|
|
39
|
+
"function": {
|
|
40
|
+
"name": tool.name,
|
|
41
|
+
"description": tool.description,
|
|
42
|
+
"parameters": tool.inputSchema
|
|
43
|
+
}
|
|
44
|
+
})
|
|
45
|
+
return openai_tools
|
|
46
|
+
|
|
47
|
+
def set_system_prompt(self, prompt: str):
|
|
48
|
+
"""Sets the system prompt for the LLM."""
|
|
49
|
+
if self.messages and self.messages[0]["role"] == "system":
|
|
50
|
+
self.messages[0]["content"] = prompt
|
|
51
|
+
else:
|
|
52
|
+
self.messages.insert(0, {"role": "system", "content": prompt})
|
|
53
|
+
|
|
54
|
+
async def chat(self, user_input: str) -> str:
|
|
55
|
+
"""
|
|
56
|
+
Sends a user message to the LLM and processes any required tool calls
|
|
57
|
+
from the MCP server before returning the final text response.
|
|
58
|
+
"""
|
|
59
|
+
self.messages.append({"role": "user", "content": user_input})
|
|
60
|
+
|
|
61
|
+
mcp_tools = await self.mcp_client.get_tools()
|
|
62
|
+
available_openai_tools = self._convert_mcp_to_openai_tools(mcp_tools)
|
|
63
|
+
|
|
64
|
+
while True:
|
|
65
|
+
chat_kwargs = {
|
|
66
|
+
"model": self.model_name,
|
|
67
|
+
"messages": self.messages,
|
|
68
|
+
}
|
|
69
|
+
if available_openai_tools:
|
|
70
|
+
chat_kwargs["tools"] = available_openai_tools
|
|
71
|
+
chat_kwargs["tool_choice"] = "auto"
|
|
72
|
+
|
|
73
|
+
response = await self.client.chat.completions.create(**chat_kwargs)
|
|
74
|
+
response_message = response.choices[0].message
|
|
75
|
+
|
|
76
|
+
# Append the assistant's message to the history
|
|
77
|
+
self.messages.append(response_message)
|
|
78
|
+
|
|
79
|
+
# If the LLM decides to call a tool (or multiple tools)
|
|
80
|
+
if response_message.tool_calls:
|
|
81
|
+
for tool_call in response_message.tool_calls:
|
|
82
|
+
tool_name = tool_call.function.name
|
|
83
|
+
tool_args = json.loads(tool_call.function.arguments)
|
|
84
|
+
|
|
85
|
+
# Execute tool via MCP client
|
|
86
|
+
tool_result_str = await self.mcp_client.call_tool(tool_name, tool_args)
|
|
87
|
+
|
|
88
|
+
# Feed the result back to the LLM
|
|
89
|
+
self.messages.append({
|
|
90
|
+
"role": "tool",
|
|
91
|
+
"tool_call_id": tool_call.id,
|
|
92
|
+
"name": tool_name,
|
|
93
|
+
"content": tool_result_str
|
|
94
|
+
})
|
|
95
|
+
# After resolving tools, loop to let the LLM generate a final text response
|
|
96
|
+
else:
|
|
97
|
+
return response_message.content
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Empty tests init
|