mcp-use 0.0.3__tar.gz → 0.0.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use-0.0.4/.github/workflows/publish.yml +31 -0
- mcp_use-0.0.4/.github/workflows/tests.yml +31 -0
- mcp_use-0.0.4/.gitignore +124 -0
- mcp_use-0.0.4/.pre-commit-config.yaml +24 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/PKG-INFO +24 -36
- {mcp_use-0.0.3 → mcp_use-0.0.4}/README.md +2 -2
- mcp_use-0.0.4/examples/airbnb_mcp.json +8 -0
- mcp_use-0.0.4/examples/airbnb_use.py +49 -0
- mcp_use-0.0.4/examples/blender_use.py +51 -0
- mcp_use-0.0.4/examples/browser_mcp.json +11 -0
- mcp_use-0.0.4/examples/browser_use.py +44 -0
- mcp_use-0.0.4/examples/chat_examle.py +78 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/__init__.py +1 -1
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/agents/langchain_agent.py +46 -6
- mcp_use-0.0.4/mcp_use/agents/mcpagent.py +308 -0
- mcp_use-0.0.4/mcp_use/agents/prompts/default.py +11 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/client.py +44 -28
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/connectors/http.py +91 -7
- mcp_use-0.0.4/mcp_use/connectors/stdio.py +188 -0
- mcp_use-0.0.4/mcp_use/connectors/websocket.py +245 -0
- mcp_use-0.0.4/mcp_use/task_managers/__init__.py +18 -0
- mcp_use-0.0.4/mcp_use/task_managers/base.py +151 -0
- mcp_use-0.0.4/mcp_use/task_managers/http.py +62 -0
- mcp_use-0.0.4/mcp_use/task_managers/stdio.py +73 -0
- mcp_use-0.0.4/mcp_use/task_managers/websocket.py +63 -0
- mcp_use-0.0.4/pyproject.toml +83 -0
- mcp_use-0.0.4/tests/unit/test_placeholder.py +16 -0
- mcp_use-0.0.3/mcp_use/agents/mcpagent.py +0 -149
- mcp_use-0.0.3/mcp_use/connectors/stdio.py +0 -124
- mcp_use-0.0.3/mcp_use/connectors/websocket.py +0 -142
- mcp_use-0.0.3/mcp_use.egg-info/PKG-INFO +0 -368
- mcp_use-0.0.3/mcp_use.egg-info/SOURCES.txt +0 -23
- mcp_use-0.0.3/mcp_use.egg-info/dependency_links.txt +0 -1
- mcp_use-0.0.3/mcp_use.egg-info/requires.txt +0 -24
- mcp_use-0.0.3/mcp_use.egg-info/top_level.txt +0 -1
- mcp_use-0.0.3/pyproject.toml +0 -31
- mcp_use-0.0.3/setup.cfg +0 -4
- mcp_use-0.0.3/setup.py +0 -60
- {mcp_use-0.0.3 → mcp_use-0.0.4}/LICENSE +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/agents/__init__.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/agents/base.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/config.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/connectors/__init__.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/connectors/base.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/logging.py +0 -0
- {mcp_use-0.0.3 → mcp_use-0.0.4}/mcp_use/session.py +0 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
name: Publish to PyPI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [created]
|
|
6
|
+
|
|
7
|
+
# Required for PyPI trusted publishing
|
|
8
|
+
permissions:
|
|
9
|
+
id-token: write
|
|
10
|
+
contents: read
|
|
11
|
+
|
|
12
|
+
jobs:
|
|
13
|
+
deploy:
|
|
14
|
+
runs-on: ubuntu-latest
|
|
15
|
+
steps:
|
|
16
|
+
- uses: actions/checkout@v3
|
|
17
|
+
- name: Set up Python
|
|
18
|
+
uses: actions/setup-python@v4
|
|
19
|
+
with:
|
|
20
|
+
python-version: "3.11"
|
|
21
|
+
- name: Install dependencies
|
|
22
|
+
run: |
|
|
23
|
+
python -m pip install --upgrade pip
|
|
24
|
+
pip install build twine wheel
|
|
25
|
+
- name: Build package
|
|
26
|
+
run: |
|
|
27
|
+
python -m build
|
|
28
|
+
- name: Publish to PyPI
|
|
29
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
30
|
+
with:
|
|
31
|
+
password: ${{ secrets.PYPI_API_TOKEN }}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
name: Python Tests
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
pull_request:
|
|
7
|
+
branches: [main]
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
test:
|
|
11
|
+
runs-on: ubuntu-latest
|
|
12
|
+
strategy:
|
|
13
|
+
matrix:
|
|
14
|
+
python-version: ["3.11", "3.12"]
|
|
15
|
+
|
|
16
|
+
steps:
|
|
17
|
+
- uses: actions/checkout@v3
|
|
18
|
+
- name: Set up Python ${{ matrix.python-version }}
|
|
19
|
+
uses: actions/setup-python@v4
|
|
20
|
+
with:
|
|
21
|
+
python-version: ${{ matrix.python-version }}
|
|
22
|
+
- name: Install dependencies
|
|
23
|
+
run: |
|
|
24
|
+
python -m pip install --upgrade pip
|
|
25
|
+
pip install .[dev,anthropic,openai]
|
|
26
|
+
- name: Lint with ruff
|
|
27
|
+
run: |
|
|
28
|
+
ruff check .
|
|
29
|
+
- name: Test with pytest
|
|
30
|
+
run: |
|
|
31
|
+
pytest
|
mcp_use-0.0.4/.gitignore
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
|
|
2
|
+
# Byte-compiled / optimized / DLL files
|
|
3
|
+
__pycache__/
|
|
4
|
+
*.py[cod]
|
|
5
|
+
*$py.class
|
|
6
|
+
|
|
7
|
+
# C extensions
|
|
8
|
+
*.so
|
|
9
|
+
|
|
10
|
+
# Distribution / packaging
|
|
11
|
+
.Python
|
|
12
|
+
build/
|
|
13
|
+
develop-eggs/
|
|
14
|
+
dist/
|
|
15
|
+
downloads/
|
|
16
|
+
eggs/
|
|
17
|
+
.eggs/
|
|
18
|
+
lib/
|
|
19
|
+
lib64/
|
|
20
|
+
parts/
|
|
21
|
+
sdist/
|
|
22
|
+
var/
|
|
23
|
+
wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
|
|
28
|
+
# PyInstaller
|
|
29
|
+
*.manifest
|
|
30
|
+
*.spec
|
|
31
|
+
|
|
32
|
+
# Installer logs
|
|
33
|
+
pip-log.txt
|
|
34
|
+
pip-delete-this-directory.txt
|
|
35
|
+
|
|
36
|
+
# Unit test / coverage reports
|
|
37
|
+
htmlcov/
|
|
38
|
+
.tox/
|
|
39
|
+
.nox/
|
|
40
|
+
.coverage
|
|
41
|
+
.coverage.*
|
|
42
|
+
.cache
|
|
43
|
+
nosetests.xml
|
|
44
|
+
coverage.xml
|
|
45
|
+
*.cover
|
|
46
|
+
.hypothesis/
|
|
47
|
+
.pytest_cache/
|
|
48
|
+
|
|
49
|
+
# Translations
|
|
50
|
+
*.mo
|
|
51
|
+
*.pot
|
|
52
|
+
|
|
53
|
+
# Django stuff:
|
|
54
|
+
*.log
|
|
55
|
+
local_settings.py
|
|
56
|
+
db.sqlite3
|
|
57
|
+
db.sqlite3-journal
|
|
58
|
+
|
|
59
|
+
# Flask stuff:
|
|
60
|
+
instance/
|
|
61
|
+
.webassets-cache
|
|
62
|
+
|
|
63
|
+
# Scrapy stuff:
|
|
64
|
+
.scrapy
|
|
65
|
+
|
|
66
|
+
# Sphinx documentation
|
|
67
|
+
docs/_build/
|
|
68
|
+
|
|
69
|
+
# PyBuilder
|
|
70
|
+
target/
|
|
71
|
+
|
|
72
|
+
# Jupyter Notebook
|
|
73
|
+
.ipynb_checkpoints
|
|
74
|
+
|
|
75
|
+
# IPython
|
|
76
|
+
profile_default/
|
|
77
|
+
ipython_config.py
|
|
78
|
+
|
|
79
|
+
# pyenv
|
|
80
|
+
.python-version
|
|
81
|
+
|
|
82
|
+
# pipenv
|
|
83
|
+
Pipfile.lock
|
|
84
|
+
|
|
85
|
+
# poetry
|
|
86
|
+
poetry.lock
|
|
87
|
+
|
|
88
|
+
# Environment variables
|
|
89
|
+
.env
|
|
90
|
+
.venv
|
|
91
|
+
env/
|
|
92
|
+
venv/
|
|
93
|
+
ENV/
|
|
94
|
+
env.bak/
|
|
95
|
+
venv.bak/
|
|
96
|
+
|
|
97
|
+
# Spyder project settings
|
|
98
|
+
.spyderproject
|
|
99
|
+
.spyproject
|
|
100
|
+
|
|
101
|
+
# Rope project settings
|
|
102
|
+
.ropeproject
|
|
103
|
+
|
|
104
|
+
# mkdocs documentation
|
|
105
|
+
/site
|
|
106
|
+
|
|
107
|
+
# mypy
|
|
108
|
+
.mypy_cache/
|
|
109
|
+
.dmypy.json
|
|
110
|
+
dmypy.json
|
|
111
|
+
|
|
112
|
+
# Pyre type checker
|
|
113
|
+
.pyre/
|
|
114
|
+
|
|
115
|
+
# VS Code
|
|
116
|
+
.vscode/
|
|
117
|
+
*.code-workspace
|
|
118
|
+
|
|
119
|
+
# PyCharm
|
|
120
|
+
.idea/
|
|
121
|
+
*.iml
|
|
122
|
+
|
|
123
|
+
# macOS
|
|
124
|
+
.DS_Store
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
repos:
|
|
2
|
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
|
3
|
+
# Ruff version.
|
|
4
|
+
rev: v0.3.2
|
|
5
|
+
hooks:
|
|
6
|
+
- id: ruff
|
|
7
|
+
args: [--fix, --exit-non-zero-on-fix, --config=pyproject.toml]
|
|
8
|
+
types: [python]
|
|
9
|
+
- id: ruff-format
|
|
10
|
+
args: [--config=pyproject.toml]
|
|
11
|
+
types: [python]
|
|
12
|
+
|
|
13
|
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
14
|
+
rev: v4.5.0
|
|
15
|
+
hooks:
|
|
16
|
+
- id: trailing-whitespace
|
|
17
|
+
- id: end-of-file-fixer
|
|
18
|
+
- id: check-yaml
|
|
19
|
+
- id: check-added-large-files
|
|
20
|
+
- id: debug-statements
|
|
21
|
+
|
|
22
|
+
# Define configuration for the Python checks
|
|
23
|
+
default_language_version:
|
|
24
|
+
python: python3.11
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp_use
|
|
3
|
-
Version: 0.0.
|
|
4
|
-
Summary:
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
Version: 0.0.4
|
|
4
|
+
Summary: MCP Library for LLMs
|
|
5
|
+
Author-email: Pietro Zullo <pietro.zullo@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
License-File: LICENSE
|
|
8
8
|
Classifier: Development Status :: 3 - Alpha
|
|
9
9
|
Classifier: Intended Audience :: Developers
|
|
10
10
|
Classifier: License :: OSI Approved :: MIT License
|
|
@@ -14,40 +14,28 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.12
|
|
15
15
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
16
16
|
Requires-Python: >=3.11
|
|
17
|
-
Description-Content-Type: text/markdown
|
|
18
|
-
License-File: LICENSE
|
|
19
|
-
Requires-Dist: mcp
|
|
20
|
-
Requires-Dist: langchain>=0.1.0
|
|
21
|
-
Requires-Dist: langchain-community>=0.0.10
|
|
22
|
-
Requires-Dist: websockets>=12.0
|
|
23
17
|
Requires-Dist: aiohttp>=3.9.0
|
|
24
|
-
Requires-Dist: pydantic>=2.0.0
|
|
25
|
-
Requires-Dist: typing-extensions>=4.8.0
|
|
26
18
|
Requires-Dist: jsonschema-pydantic>=0.1.0
|
|
19
|
+
Requires-Dist: langchain-community>=0.0.10
|
|
20
|
+
Requires-Dist: langchain>=0.1.0
|
|
21
|
+
Requires-Dist: mcp
|
|
22
|
+
Requires-Dist: pydantic>=2.0.0
|
|
27
23
|
Requires-Dist: python-dotenv>=1.0.0
|
|
28
|
-
|
|
29
|
-
Requires-Dist:
|
|
30
|
-
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
31
|
-
Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
|
|
32
|
-
Requires-Dist: black>=23.9.0; extra == "dev"
|
|
33
|
-
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
34
|
-
Requires-Dist: mypy>=1.5.0; extra == "dev"
|
|
35
|
-
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
24
|
+
Requires-Dist: typing-extensions>=4.8.0
|
|
25
|
+
Requires-Dist: websockets>=12.0
|
|
36
26
|
Provides-Extra: anthropic
|
|
37
|
-
Requires-Dist: anthropic>=0.15.0; extra ==
|
|
27
|
+
Requires-Dist: anthropic>=0.15.0; extra == 'anthropic'
|
|
28
|
+
Provides-Extra: dev
|
|
29
|
+
Requires-Dist: black>=23.9.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: isort>=5.12.0; extra == 'dev'
|
|
31
|
+
Requires-Dist: mypy>=1.5.0; extra == 'dev'
|
|
32
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
|
33
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
|
|
34
|
+
Requires-Dist: pytest>=7.4.0; extra == 'dev'
|
|
35
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
38
36
|
Provides-Extra: openai
|
|
39
|
-
Requires-Dist: openai>=1.10.0; extra ==
|
|
40
|
-
|
|
41
|
-
Dynamic: author-email
|
|
42
|
-
Dynamic: classifier
|
|
43
|
-
Dynamic: description
|
|
44
|
-
Dynamic: description-content-type
|
|
45
|
-
Dynamic: home-page
|
|
46
|
-
Dynamic: license-file
|
|
47
|
-
Dynamic: provides-extra
|
|
48
|
-
Dynamic: requires-dist
|
|
49
|
-
Dynamic: requires-python
|
|
50
|
-
Dynamic: summary
|
|
37
|
+
Requires-Dist: openai>=1.10.0; extra == 'openai'
|
|
38
|
+
Description-Content-Type: text/markdown
|
|
51
39
|
|
|
52
40
|
<picture>
|
|
53
41
|
<source media="(prefers-color-scheme: dark)" srcset="./static/mcp-use-dark.png">
|
|
@@ -55,7 +43,7 @@ Dynamic: summary
|
|
|
55
43
|
<img alt="Shows a black MCP-Use Logo in light color mode and a white one in dark color mode." src="./static/mcp-use.png" width="full">
|
|
56
44
|
</picture>
|
|
57
45
|
|
|
58
|
-
<h1 align="center">
|
|
46
|
+
<h1 align="center">Use MCPs directly from python 🤖</h1>
|
|
59
47
|
|
|
60
48
|
[](https://github.com/pietrozullo/mcp-use/stargazers)
|
|
61
49
|
|
|
@@ -356,7 +344,7 @@ If you use MCP-Use in your research or project, please cite:
|
|
|
356
344
|
```bibtex
|
|
357
345
|
@software{mcp_use2024,
|
|
358
346
|
author = {Zullo, Pietro},
|
|
359
|
-
title = {MCP-Use:
|
|
347
|
+
title = {MCP-Use: MCP Library for Python},
|
|
360
348
|
year = {2024},
|
|
361
349
|
publisher = {GitHub},
|
|
362
350
|
url = {https://github.com/pietrozullo/mcp-use}
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
<img alt="Shows a black MCP-Use Logo in light color mode and a white one in dark color mode." src="./static/mcp-use.png" width="full">
|
|
5
5
|
</picture>
|
|
6
6
|
|
|
7
|
-
<h1 align="center">
|
|
7
|
+
<h1 align="center">Use MCPs directly from python 🤖</h1>
|
|
8
8
|
|
|
9
9
|
[](https://github.com/pietrozullo/mcp-use/stargazers)
|
|
10
10
|
|
|
@@ -305,7 +305,7 @@ If you use MCP-Use in your research or project, please cite:
|
|
|
305
305
|
```bibtex
|
|
306
306
|
@software{mcp_use2024,
|
|
307
307
|
author = {Zullo, Pietro},
|
|
308
|
-
title = {MCP-Use:
|
|
308
|
+
title = {MCP-Use: MCP Library for Python},
|
|
309
309
|
year = {2024},
|
|
310
310
|
publisher = {GitHub},
|
|
311
311
|
url = {https://github.com/pietrozullo/mcp-use}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Example demonstrating how to use mcp_use with Airbnb.
|
|
3
|
+
|
|
4
|
+
This example shows how to connect an LLM to Airbnb through MCP tools
|
|
5
|
+
to perform tasks like searching for accommodations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
from langchain_anthropic import ChatAnthropic
|
|
13
|
+
|
|
14
|
+
from mcp_use import MCPAgent, MCPClient
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def run_airbnb_example():
|
|
18
|
+
"""Run an example using Airbnb MCP server."""
|
|
19
|
+
# Load environment variables
|
|
20
|
+
load_dotenv()
|
|
21
|
+
|
|
22
|
+
# Create MCPClient with Airbnb configuration
|
|
23
|
+
client = MCPClient.from_config_file(os.path.join(os.path.dirname(__file__), "airbnb_mcp.json"))
|
|
24
|
+
# Create LLM - you can choose between different models
|
|
25
|
+
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
|
|
26
|
+
# Alternative models:
|
|
27
|
+
# llm = init_chat_model(model="llama-3.1-8b-instant", model_provider="groq")
|
|
28
|
+
# llm = ChatOpenAI(model="gpt-4o")
|
|
29
|
+
|
|
30
|
+
# Create agent with the client
|
|
31
|
+
agent = MCPAgent(llm=llm, client=client, max_steps=30)
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
# Run a query to search for accommodations
|
|
35
|
+
result = await agent.run(
|
|
36
|
+
"Find me a nice place to stay in Barcelona for 2 adults "
|
|
37
|
+
"for a week in August. I prefer places with a pool and "
|
|
38
|
+
"good reviews. Show me the top 3 options.",
|
|
39
|
+
max_steps=30,
|
|
40
|
+
)
|
|
41
|
+
print(f"\nResult: {result}")
|
|
42
|
+
finally:
|
|
43
|
+
# Ensure we clean up resources properly
|
|
44
|
+
if client.sessions:
|
|
45
|
+
await client.close_all_sessions()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
if __name__ == "__main__":
|
|
49
|
+
asyncio.run(run_airbnb_example())
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Blender MCP example for mcp_use.
|
|
3
|
+
|
|
4
|
+
This example demonstrates how to use the mcp_use library with MCPClient
|
|
5
|
+
to connect an LLM to Blender through MCP tools via WebSocket.
|
|
6
|
+
The example assumes you have installed the Blender MCP addon from:
|
|
7
|
+
https://github.com/gd3kr/BlenderGPT/tree/main/mcp_addon
|
|
8
|
+
|
|
9
|
+
Make sure the addon is enabled in Blender preferences and the WebSocket
|
|
10
|
+
server is running before executing this script.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
|
|
15
|
+
from dotenv import load_dotenv
|
|
16
|
+
from langchain_anthropic import ChatAnthropic
|
|
17
|
+
|
|
18
|
+
from mcp_use import MCPAgent, MCPClient
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def run_blender_example():
|
|
22
|
+
"""Run the Blender MCP example."""
|
|
23
|
+
# Load environment variables
|
|
24
|
+
load_dotenv()
|
|
25
|
+
|
|
26
|
+
# Create MCPClient with Blender MCP configuration
|
|
27
|
+
config = {"mcpServers": {"blender": {"command": "uvx", "args": ["blender-mcp"]}}}
|
|
28
|
+
client = MCPClient.from_dict(config)
|
|
29
|
+
|
|
30
|
+
# Create LLM
|
|
31
|
+
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
|
|
32
|
+
|
|
33
|
+
# Create agent with the client
|
|
34
|
+
agent = MCPAgent(llm=llm, client=client, max_steps=30)
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
# Run the query
|
|
38
|
+
result = await agent.run(
|
|
39
|
+
"Create an inflatable cube with soft material and a plane as ground.",
|
|
40
|
+
max_steps=30,
|
|
41
|
+
)
|
|
42
|
+
print(f"\nResult: {result}")
|
|
43
|
+
finally:
|
|
44
|
+
# Ensure we clean up resources properly
|
|
45
|
+
if client.sessions:
|
|
46
|
+
await client.close_all_sessions()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
if __name__ == "__main__":
|
|
50
|
+
# Run the Blender example
|
|
51
|
+
asyncio.run(run_blender_example())
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Basic usage example for mcp_use.
|
|
3
|
+
|
|
4
|
+
This example demonstrates how to use the mcp_use library with MCPClient
|
|
5
|
+
to connect any LLM to MCP tools through a unified interface.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
from langchain_openai import ChatOpenAI
|
|
13
|
+
|
|
14
|
+
from mcp_use import MCPAgent, MCPClient
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def main():
|
|
18
|
+
"""Run the example using a configuration file."""
|
|
19
|
+
# Load environment variables
|
|
20
|
+
load_dotenv()
|
|
21
|
+
|
|
22
|
+
# Create MCPClient from config file
|
|
23
|
+
client = MCPClient.from_config_file(os.path.join(os.path.dirname(__file__), "browser_mcp.json"))
|
|
24
|
+
|
|
25
|
+
# Create LLM
|
|
26
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
27
|
+
# llm = init_chat_model(model="llama-3.1-8b-instant", model_provider="groq")
|
|
28
|
+
# llm = ChatAnthropic(model="claude-3-")
|
|
29
|
+
# llm = ChatGroq(model="llama3-8b-8192")
|
|
30
|
+
|
|
31
|
+
# Create agent with the client
|
|
32
|
+
agent = MCPAgent(llm=llm, client=client, max_steps=30)
|
|
33
|
+
|
|
34
|
+
# Run the query
|
|
35
|
+
result = await agent.run(
|
|
36
|
+
"Find the best restaurant in San Francisco USING GOOGLE SEARCH",
|
|
37
|
+
max_steps=30,
|
|
38
|
+
)
|
|
39
|
+
print(f"\nResult: {result}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
if __name__ == "__main__":
|
|
43
|
+
# Run the appropriate example
|
|
44
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Simple chat example using MCPAgent with built-in conversation memory.
|
|
3
|
+
|
|
4
|
+
This example demonstrates how to use the MCPAgent with its built-in
|
|
5
|
+
conversation history capabilities for better contextual interactions.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
from langchain_openai import ChatOpenAI
|
|
12
|
+
|
|
13
|
+
from mcp_use import MCPAgent, MCPClient
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
async def run_memory_chat():
|
|
17
|
+
"""Run a chat using MCPAgent's built-in conversation memory."""
|
|
18
|
+
# Load environment variables for API keys
|
|
19
|
+
load_dotenv()
|
|
20
|
+
|
|
21
|
+
# Config file path - change this to your config file
|
|
22
|
+
config_file = "examples/browser_mcp.json"
|
|
23
|
+
|
|
24
|
+
print("Initializing chat...")
|
|
25
|
+
|
|
26
|
+
# Create MCP client and agent with memory enabled
|
|
27
|
+
client = MCPClient.from_config_file(config_file)
|
|
28
|
+
llm = ChatOpenAI(model="gpt-4o-mini")
|
|
29
|
+
|
|
30
|
+
# Create agent with memory_enabled=True
|
|
31
|
+
agent = MCPAgent(
|
|
32
|
+
llm=llm,
|
|
33
|
+
client=client,
|
|
34
|
+
max_steps=15,
|
|
35
|
+
memory_enabled=True, # Enable built-in conversation memory
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
print("\n===== Interactive MCP Chat =====")
|
|
39
|
+
print("Type 'exit' or 'quit' to end the conversation")
|
|
40
|
+
print("Type 'clear' to clear conversation history")
|
|
41
|
+
print("==================================\n")
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
# Main chat loop
|
|
45
|
+
while True:
|
|
46
|
+
# Get user input
|
|
47
|
+
user_input = input("\nYou: ")
|
|
48
|
+
|
|
49
|
+
# Check for exit command
|
|
50
|
+
if user_input.lower() in ["exit", "quit"]:
|
|
51
|
+
print("Ending conversation...")
|
|
52
|
+
break
|
|
53
|
+
|
|
54
|
+
# Check for clear history command
|
|
55
|
+
if user_input.lower() == "clear":
|
|
56
|
+
agent.clear_conversation_history()
|
|
57
|
+
print("Conversation history cleared.")
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
# Get response from agent
|
|
61
|
+
print("\nAssistant: ", end="", flush=True)
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
# Run the agent with the user input (memory handling is automatic)
|
|
65
|
+
response = await agent.run(user_input)
|
|
66
|
+
print(response)
|
|
67
|
+
|
|
68
|
+
except Exception as e:
|
|
69
|
+
print(f"\nError: {e}")
|
|
70
|
+
|
|
71
|
+
finally:
|
|
72
|
+
# Clean up
|
|
73
|
+
if client and client.sessions:
|
|
74
|
+
await client.close_all_sessions()
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
if __name__ == "__main__":
|
|
78
|
+
asyncio.run(run_memory_chat())
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
"""
|
|
2
|
-
LangChain agent implementation for MCP tools.
|
|
2
|
+
LangChain agent implementation for MCP tools with customizable system message.
|
|
3
3
|
|
|
4
4
|
This module provides a LangChain agent implementation that can use MCP tools
|
|
5
|
-
through a unified interface.
|
|
5
|
+
through a unified interface, with support for customizable system messages.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from typing import Any, NoReturn
|
|
@@ -73,8 +73,15 @@ class LangChainAgent:
|
|
|
73
73
|
through a unified interface.
|
|
74
74
|
"""
|
|
75
75
|
|
|
76
|
+
# Default system message if none is provided
|
|
77
|
+
DEFAULT_SYSTEM_MESSAGE = "You are a helpful AI assistant that can use tools to help users."
|
|
78
|
+
|
|
76
79
|
def __init__(
|
|
77
|
-
self,
|
|
80
|
+
self,
|
|
81
|
+
connector: BaseConnector,
|
|
82
|
+
llm: BaseLanguageModel,
|
|
83
|
+
max_steps: int = 5,
|
|
84
|
+
system_message: str | None = None,
|
|
78
85
|
) -> None:
|
|
79
86
|
"""Initialize a new LangChain agent.
|
|
80
87
|
|
|
@@ -82,13 +89,28 @@ class LangChainAgent:
|
|
|
82
89
|
connector: The MCP connector to use.
|
|
83
90
|
llm: The LangChain LLM to use.
|
|
84
91
|
max_steps: The maximum number of steps to take.
|
|
92
|
+
system_message: Optional custom system message to use.
|
|
85
93
|
"""
|
|
86
94
|
self.connector = connector
|
|
87
95
|
self.llm = llm
|
|
88
96
|
self.max_steps = max_steps
|
|
97
|
+
self.system_message = system_message or self.DEFAULT_SYSTEM_MESSAGE
|
|
89
98
|
self.tools: list[BaseTool] = []
|
|
90
99
|
self.agent: AgentExecutor | None = None
|
|
91
100
|
|
|
101
|
+
def set_system_message(self, message: str) -> None:
|
|
102
|
+
"""Set a new system message and recreate the agent.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
message: The new system message.
|
|
106
|
+
"""
|
|
107
|
+
self.system_message = message
|
|
108
|
+
|
|
109
|
+
# Recreate the agent with the new system message if it exists
|
|
110
|
+
if self.agent and self.tools:
|
|
111
|
+
self.agent = self._create_agent()
|
|
112
|
+
logger.info("Agent recreated with new system message")
|
|
113
|
+
|
|
92
114
|
async def initialize(self) -> None:
|
|
93
115
|
"""Initialize the agent and its tools."""
|
|
94
116
|
self.tools = await self._create_langchain_tools()
|
|
@@ -184,7 +206,7 @@ class LangChainAgent:
|
|
|
184
206
|
return langchain_tools
|
|
185
207
|
|
|
186
208
|
def _create_agent(self) -> AgentExecutor:
|
|
187
|
-
"""Create the LangChain agent.
|
|
209
|
+
"""Create the LangChain agent with the configured system message.
|
|
188
210
|
|
|
189
211
|
Returns:
|
|
190
212
|
An initialized AgentExecutor.
|
|
@@ -193,7 +215,7 @@ class LangChainAgent:
|
|
|
193
215
|
[
|
|
194
216
|
(
|
|
195
217
|
"system",
|
|
196
|
-
|
|
218
|
+
self.system_message,
|
|
197
219
|
),
|
|
198
220
|
MessagesPlaceholder(variable_name="chat_history"),
|
|
199
221
|
("human", "{input}"),
|
|
@@ -234,7 +256,25 @@ class LangChainAgent:
|
|
|
234
256
|
if chat_history is None:
|
|
235
257
|
chat_history = []
|
|
236
258
|
|
|
259
|
+
# Add a hint to use tools for queries about current information
|
|
260
|
+
enhanced_query = query
|
|
261
|
+
if any(
|
|
262
|
+
keyword in query.lower()
|
|
263
|
+
for keyword in [
|
|
264
|
+
"weather",
|
|
265
|
+
"current",
|
|
266
|
+
"today",
|
|
267
|
+
"now",
|
|
268
|
+
"latest",
|
|
269
|
+
"news",
|
|
270
|
+
"price",
|
|
271
|
+
"stock",
|
|
272
|
+
]
|
|
273
|
+
):
|
|
274
|
+
# Just log this, don't modify the query
|
|
275
|
+
logger.info("Query involves current information that may benefit from tool use")
|
|
276
|
+
|
|
237
277
|
# Invoke with all required variables
|
|
238
|
-
result = await self.agent.ainvoke({"input":
|
|
278
|
+
result = await self.agent.ainvoke({"input": enhanced_query, "chat_history": chat_history})
|
|
239
279
|
|
|
240
280
|
return result["output"]
|