langchain-mcp-tools 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_mcp_tools-0.0.1/LICENSE +21 -0
- langchain_mcp_tools-0.0.1/PKG-INFO +93 -0
- langchain_mcp_tools-0.0.1/README.md +75 -0
- langchain_mcp_tools-0.0.1/pyproject.toml +25 -0
- langchain_mcp_tools-0.0.1/setup.cfg +4 -0
- langchain_mcp_tools-0.0.1/src/cli_chat.py +274 -0
- langchain_mcp_tools-0.0.1/src/config_loader.py +39 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.egg-info/PKG-INFO +93 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.egg-info/SOURCES.txt +12 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.egg-info/dependency_links.txt +1 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.egg-info/requires.txt +10 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.egg-info/top_level.txt +3 -0
- langchain_mcp_tools-0.0.1/src/langchain_mcp_tools.py +278 -0
- langchain_mcp_tools-0.0.1/tests/test_langchain_mcp_tools.py +163 -0
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 hideya kawahara
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
@@ -0,0 +1,93 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: langchain-mcp-tools
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: MCP Client Using LangChain / Python
|
5
|
+
Requires-Python: >=3.11
|
6
|
+
Description-Content-Type: text/markdown
|
7
|
+
License-File: LICENSE
|
8
|
+
Requires-Dist: jsonschema-pydantic>=0.6
|
9
|
+
Requires-Dist: langchain>=0.3.14
|
10
|
+
Requires-Dist: langchain-anthropic>=0.3.1
|
11
|
+
Requires-Dist: langchain-groq>=0.2.3
|
12
|
+
Requires-Dist: langchain-openai>=0.3.0
|
13
|
+
Requires-Dist: langgraph>=0.2.62
|
14
|
+
Requires-Dist: mcp>=1.2.0
|
15
|
+
Requires-Dist: pyjson5>=1.6.8
|
16
|
+
Requires-Dist: pympler>=1.1
|
17
|
+
Requires-Dist: python-dotenv>=1.0.1
|
18
|
+
|
19
|
+
# MCP Client Using LangChain / Python [](https://github.com/hideya/mcp-langchain-client-ts/blob/main/LICENSE)
|
20
|
+
|
21
|
+
This simple [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)
|
22
|
+
client demonstrates MCP server invocations by LangChain ReAct Agent.
|
23
|
+
|
24
|
+
It leverages a utility function `convert_mcp_to_langchain_tools()` from
|
25
|
+
`langchain_mcp_tools`.
|
26
|
+
This function handles parallel initialization of specified multiple MCP servers
|
27
|
+
and converts their available tools into a list of
|
28
|
+
[LangChain-compatible tools](https://js.langchain.com/docs/how_to/tool_calling/).
|
29
|
+
|
30
|
+
LLMs from Anthropic, OpenAI and Groq are currently supported.
|
31
|
+
|
32
|
+
A typescript version of this MCP client is available
|
33
|
+
[here](https://github.com/hideya/mcp-client-langchain-ts)
|
34
|
+
|
35
|
+
## Requirements
|
36
|
+
|
37
|
+
- Python 3.11+
|
38
|
+
- [`uv`](https://docs.astral.sh/uv/) installation
|
39
|
+
- API keys from [Anthropic](https://console.anthropic.com/settings/keys),
|
40
|
+
[OpenAI](https://platform.openai.com/api-keys), and/or
|
41
|
+
[Groq](https://console.groq.com/keys)
|
42
|
+
as needed
|
43
|
+
|
44
|
+
## Setup
|
45
|
+
1. Install dependencies:
|
46
|
+
```bash
|
47
|
+
make install
|
48
|
+
```
|
49
|
+
|
50
|
+
2. Setup API keys:
|
51
|
+
```bash
|
52
|
+
cp .env.template .env
|
53
|
+
```
|
54
|
+
- Update `.env` as needed.
|
55
|
+
- `.gitignore` is configured to ignore `.env`
|
56
|
+
to prevent accidental commits of the credentials.
|
57
|
+
|
58
|
+
3. Configure LLM and MCP Servers settings `llm_mcp_config.json5` as needed.
|
59
|
+
|
60
|
+
- [The configuration file format](https://github.com/hideya/mcp-client-langchain-ts/blob/main/llm_mcp_config.json5)
|
61
|
+
for MCP servers follows the same structure as
|
62
|
+
[Claude for Desktop](https://modelcontextprotocol.io/quickstart/user),
|
63
|
+
with one difference: the key name `mcpServers` has been changed
|
64
|
+
to `mcp_servers` to follow the snake_case convention
|
65
|
+
commonly used in JSON configuration files.
|
66
|
+
- The file format is [JSON5](https://json5.org/),
|
67
|
+
where comments and trailing commas are allowed.
|
68
|
+
- The format is further extended to replace `${...}` notations
|
69
|
+
with the values of corresponding environment variables.
|
70
|
+
- Keep all the credentials and private info in the `.env` file
|
71
|
+
and refer to them with `${...}` notation as needed.
|
72
|
+
|
73
|
+
|
74
|
+
## Usage
|
75
|
+
|
76
|
+
Run the app:
|
77
|
+
```bash
|
78
|
+
make start
|
79
|
+
```
|
80
|
+
|
81
|
+
Run in verbose mode:
|
82
|
+
```bash
|
83
|
+
make start-v
|
84
|
+
```
|
85
|
+
|
86
|
+
See commandline options:
|
87
|
+
```bash
|
88
|
+
make start-h
|
89
|
+
```
|
90
|
+
|
91
|
+
At the prompt, you can simply press Enter to use example queries that perform MCP server tool invocations.
|
92
|
+
|
93
|
+
Example queries can be configured in `llm_mcp_config.json5`
|
@@ -0,0 +1,75 @@
|
|
1
|
+
# MCP Client Using LangChain / Python [](https://github.com/hideya/mcp-langchain-client-ts/blob/main/LICENSE)
|
2
|
+
|
3
|
+
This simple [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)
|
4
|
+
client demonstrates MCP server invocations by LangChain ReAct Agent.
|
5
|
+
|
6
|
+
It leverages a utility function `convert_mcp_to_langchain_tools()` from
|
7
|
+
`langchain_mcp_tools`.
|
8
|
+
This function handles parallel initialization of specified multiple MCP servers
|
9
|
+
and converts their available tools into a list of
|
10
|
+
[LangChain-compatible tools](https://js.langchain.com/docs/how_to/tool_calling/).
|
11
|
+
|
12
|
+
LLMs from Anthropic, OpenAI and Groq are currently supported.
|
13
|
+
|
14
|
+
A typescript version of this MCP client is available
|
15
|
+
[here](https://github.com/hideya/mcp-client-langchain-ts)
|
16
|
+
|
17
|
+
## Requirements
|
18
|
+
|
19
|
+
- Python 3.11+
|
20
|
+
- [`uv`](https://docs.astral.sh/uv/) installation
|
21
|
+
- API keys from [Anthropic](https://console.anthropic.com/settings/keys),
|
22
|
+
[OpenAI](https://platform.openai.com/api-keys), and/or
|
23
|
+
[Groq](https://console.groq.com/keys)
|
24
|
+
as needed
|
25
|
+
|
26
|
+
## Setup
|
27
|
+
1. Install dependencies:
|
28
|
+
```bash
|
29
|
+
make install
|
30
|
+
```
|
31
|
+
|
32
|
+
2. Setup API keys:
|
33
|
+
```bash
|
34
|
+
cp .env.template .env
|
35
|
+
```
|
36
|
+
- Update `.env` as needed.
|
37
|
+
- `.gitignore` is configured to ignore `.env`
|
38
|
+
to prevent accidental commits of the credentials.
|
39
|
+
|
40
|
+
3. Configure LLM and MCP Servers settings `llm_mcp_config.json5` as needed.
|
41
|
+
|
42
|
+
- [The configuration file format](https://github.com/hideya/mcp-client-langchain-ts/blob/main/llm_mcp_config.json5)
|
43
|
+
for MCP servers follows the same structure as
|
44
|
+
[Claude for Desktop](https://modelcontextprotocol.io/quickstart/user),
|
45
|
+
with one difference: the key name `mcpServers` has been changed
|
46
|
+
to `mcp_servers` to follow the snake_case convention
|
47
|
+
commonly used in JSON configuration files.
|
48
|
+
- The file format is [JSON5](https://json5.org/),
|
49
|
+
where comments and trailing commas are allowed.
|
50
|
+
- The format is further extended to replace `${...}` notations
|
51
|
+
with the values of corresponding environment variables.
|
52
|
+
- Keep all the credentials and private info in the `.env` file
|
53
|
+
and refer to them with `${...}` notation as needed.
|
54
|
+
|
55
|
+
|
56
|
+
## Usage
|
57
|
+
|
58
|
+
Run the app:
|
59
|
+
```bash
|
60
|
+
make start
|
61
|
+
```
|
62
|
+
|
63
|
+
Run in verbose mode:
|
64
|
+
```bash
|
65
|
+
make start-v
|
66
|
+
```
|
67
|
+
|
68
|
+
See commandline options:
|
69
|
+
```bash
|
70
|
+
make start-h
|
71
|
+
```
|
72
|
+
|
73
|
+
At the prompt, you can simply press Enter to use example queries that perform MCP server tool invocations.
|
74
|
+
|
75
|
+
Example queries can be configured in `llm_mcp_config.json5`
|
@@ -0,0 +1,25 @@
|
|
1
|
+
[project]
|
2
|
+
name = "langchain-mcp-tools"
|
3
|
+
version = "0.0.1"
|
4
|
+
description = "MCP Client Using LangChain / Python"
|
5
|
+
readme = "README.md"
|
6
|
+
requires-python = ">=3.11"
|
7
|
+
dependencies = [
|
8
|
+
"jsonschema-pydantic>=0.6",
|
9
|
+
"langchain>=0.3.14",
|
10
|
+
"langchain-anthropic>=0.3.1",
|
11
|
+
"langchain-groq>=0.2.3",
|
12
|
+
"langchain-openai>=0.3.0",
|
13
|
+
"langgraph>=0.2.62",
|
14
|
+
"mcp>=1.2.0",
|
15
|
+
"pyjson5>=1.6.8",
|
16
|
+
"pympler>=1.1",
|
17
|
+
"python-dotenv>=1.0.1",
|
18
|
+
]
|
19
|
+
|
20
|
+
[dependency-groups]
|
21
|
+
dev = [
|
22
|
+
"pytest>=8.3.4",
|
23
|
+
"pytest-asyncio>=0.25.2",
|
24
|
+
"twine>=6.0.1",
|
25
|
+
]
|
@@ -0,0 +1,274 @@
|
|
1
|
+
# Standard library imports
|
2
|
+
import argparse
|
3
|
+
import asyncio
|
4
|
+
from enum import Enum
|
5
|
+
import json
|
6
|
+
import logging
|
7
|
+
import sys
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import (
|
10
|
+
List,
|
11
|
+
Optional,
|
12
|
+
Dict,
|
13
|
+
Any,
|
14
|
+
cast,
|
15
|
+
)
|
16
|
+
|
17
|
+
# Third-party imports
|
18
|
+
try:
|
19
|
+
from dotenv import load_dotenv
|
20
|
+
from langchain.chat_models import init_chat_model
|
21
|
+
from langchain.schema import (
|
22
|
+
AIMessage,
|
23
|
+
BaseMessage,
|
24
|
+
HumanMessage,
|
25
|
+
SystemMessage,
|
26
|
+
)
|
27
|
+
from langchain_core.runnables.base import Runnable
|
28
|
+
from langchain_core.messages.tool import ToolMessage
|
29
|
+
from langgraph.prebuilt import create_react_agent
|
30
|
+
except ImportError as e:
|
31
|
+
print(f'\nError: Required package not found: {e}')
|
32
|
+
print('Please ensure all required packages are installed\n')
|
33
|
+
sys.exit(1)
|
34
|
+
|
35
|
+
# Local application imports
|
36
|
+
from config_loader import load_config
|
37
|
+
from langchain_mcp_tools import (
|
38
|
+
convert_mcp_to_langchain_tools,
|
39
|
+
McpServerCleanupFn,
|
40
|
+
)
|
41
|
+
|
42
|
+
# Type definitions
|
43
|
+
ConfigType = Dict[str, Any]
|
44
|
+
|
45
|
+
|
46
|
+
# ANSI color escape codes
|
47
|
+
class Colors(str, Enum):
|
48
|
+
YELLOW = '\033[33m' # color to yellow
|
49
|
+
CYAN = '\033[36m' # color to cyan
|
50
|
+
RESET = '\033[0m' # reset color
|
51
|
+
|
52
|
+
def __str__(self):
|
53
|
+
return self.value
|
54
|
+
|
55
|
+
|
56
|
+
def parse_arguments() -> argparse.Namespace:
|
57
|
+
"""Parse and return command line args for config path and verbosity."""
|
58
|
+
parser = argparse.ArgumentParser(
|
59
|
+
description='CLI Chat Application',
|
60
|
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
61
|
+
)
|
62
|
+
parser.add_argument(
|
63
|
+
'-c', '--config',
|
64
|
+
default='llm_mcp_config.json5',
|
65
|
+
help='path to config file',
|
66
|
+
type=Path,
|
67
|
+
metavar='PATH'
|
68
|
+
)
|
69
|
+
parser.add_argument(
|
70
|
+
'-v', '--verbose',
|
71
|
+
action='store_true',
|
72
|
+
help='run with verbose logging'
|
73
|
+
)
|
74
|
+
return parser.parse_args()
|
75
|
+
|
76
|
+
|
77
|
+
def init_logger(verbose: bool) -> logging.Logger:
|
78
|
+
"""Initialize and return a logger with appropriate verbosity level."""
|
79
|
+
logging.basicConfig(
|
80
|
+
level=logging.DEBUG if verbose else logging.INFO,
|
81
|
+
format='\x1b[90m[%(levelname)s]\x1b[0m %(message)s'
|
82
|
+
)
|
83
|
+
return logging.getLogger()
|
84
|
+
|
85
|
+
|
86
|
+
def print_colored(text: str, color: Colors, end: str = "\n") -> None:
|
87
|
+
"""Print text in specified color and reset afterwards."""
|
88
|
+
print(f"{color}{text}{Colors.RESET}", end=end)
|
89
|
+
|
90
|
+
|
91
|
+
def set_color(color: Colors) -> None:
|
92
|
+
"""Set terminal color."""
|
93
|
+
print(color, end='')
|
94
|
+
|
95
|
+
|
96
|
+
def clear_line() -> None:
|
97
|
+
"""Move up one line and clear it."""
|
98
|
+
print('\x1b[1A\x1b[2K', end='')
|
99
|
+
|
100
|
+
|
101
|
+
async def get_user_query(remaining_queries: List[str]) -> Optional[str]:
|
102
|
+
"""Get user input or next example query, handling empty inputs
|
103
|
+
and quit commands."""
|
104
|
+
set_color(Colors.YELLOW)
|
105
|
+
query = input('Query: ').strip()
|
106
|
+
|
107
|
+
if len(query) == 0:
|
108
|
+
if len(remaining_queries) > 0:
|
109
|
+
query = remaining_queries.pop(0)
|
110
|
+
clear_line()
|
111
|
+
print_colored(f'Example Query: {query}', Colors.YELLOW)
|
112
|
+
else:
|
113
|
+
set_color(Colors.RESET)
|
114
|
+
print('\nPlease type a query, or "quit" or "q" to exit\n')
|
115
|
+
return await get_user_query(remaining_queries)
|
116
|
+
|
117
|
+
print(Colors.RESET) # Reset after input
|
118
|
+
|
119
|
+
if query.lower() in ['quit', 'q']:
|
120
|
+
print_colored('Goodbye!\n', Colors.CYAN)
|
121
|
+
return None
|
122
|
+
|
123
|
+
return query
|
124
|
+
|
125
|
+
|
126
|
+
async def handle_conversation(
|
127
|
+
agent: Runnable,
|
128
|
+
messages: List[BaseMessage],
|
129
|
+
example_queries: List[str],
|
130
|
+
verbose: bool
|
131
|
+
) -> None:
|
132
|
+
"""Manage an interactive conversation loop between the user and AI agent.
|
133
|
+
|
134
|
+
Args:
|
135
|
+
agent (Runnable): The initialized ReAct agent that processes queries
|
136
|
+
messages (List[BaseMessage]): List to maintain conversation history
|
137
|
+
example_queries (List[str]): list of example queries that can be used
|
138
|
+
when user presses Enter
|
139
|
+
verbose (bool): Flag to control detailed output of tool responses
|
140
|
+
|
141
|
+
Exception handling:
|
142
|
+
- TypeError: Ensures response is in correct string format
|
143
|
+
- General exceptions: Allows conversation to continue after errors
|
144
|
+
|
145
|
+
The conversation continues until user types 'quit' or 'q'.
|
146
|
+
"""
|
147
|
+
print('\nConversation started. '
|
148
|
+
'Type "quit" or "q" to end the conversation.\n')
|
149
|
+
if len(example_queries) > 0:
|
150
|
+
print('Example Queries (just type Enter to supply them one by one):')
|
151
|
+
for ex_q in example_queries:
|
152
|
+
print(f"- {ex_q}")
|
153
|
+
print()
|
154
|
+
|
155
|
+
while True:
|
156
|
+
try:
|
157
|
+
query = await get_user_query(example_queries)
|
158
|
+
if not query:
|
159
|
+
break
|
160
|
+
|
161
|
+
messages.append(HumanMessage(content=query))
|
162
|
+
|
163
|
+
result = await agent.ainvoke({
|
164
|
+
'messages': messages
|
165
|
+
})
|
166
|
+
|
167
|
+
result_messages = cast(List[BaseMessage], result['messages'])
|
168
|
+
# the last message should be an AIMessage
|
169
|
+
response = result_messages[-1].content
|
170
|
+
if not isinstance(response, str):
|
171
|
+
raise TypeError(
|
172
|
+
f"Expected string response, got {type(response)}"
|
173
|
+
)
|
174
|
+
|
175
|
+
# check if msg one before is a ToolMessage
|
176
|
+
message_one_before = result_messages[-2]
|
177
|
+
if isinstance(message_one_before, ToolMessage):
|
178
|
+
if verbose:
|
179
|
+
# show tools call response
|
180
|
+
print(message_one_before.content)
|
181
|
+
# new line after tool call output
|
182
|
+
print()
|
183
|
+
print_colored(f"{response}\n", Colors.CYAN)
|
184
|
+
messages.append(AIMessage(content=response))
|
185
|
+
|
186
|
+
except Exception as e:
|
187
|
+
print(f'Error getting response: {str(e)}')
|
188
|
+
print('You can continue chatting or type "quit" to exit.')
|
189
|
+
|
190
|
+
|
191
|
+
async def init_react_agent(
|
192
|
+
config: ConfigType,
|
193
|
+
logger: logging.Logger
|
194
|
+
) -> tuple[Runnable, List[BaseMessage], McpServerCleanupFn]:
|
195
|
+
"""Initialize and configure a ReAct agent for conversation handling.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
config (ConfigType): Configuration dictionary containing LLM and
|
199
|
+
MCP server settings
|
200
|
+
logger (logging.Logger): Logger instance for initialization
|
201
|
+
status updates
|
202
|
+
|
203
|
+
Returns:
|
204
|
+
tuple[Runnable, List[BaseMessage], McpServerCleanupFn]:
|
205
|
+
Returns a tuple containing:
|
206
|
+
- Configured ReAct agent ready for conversation
|
207
|
+
- Initial message list (empty or with system prompt)
|
208
|
+
- Cleanup function for MCP server connections
|
209
|
+
"""
|
210
|
+
llm_config = config['llm']
|
211
|
+
logger.info(f'Initializing model... {json.dumps(llm_config, indent=2)}\n')
|
212
|
+
|
213
|
+
llm = init_chat_model(
|
214
|
+
model=llm_config['model'],
|
215
|
+
model_provider=llm_config['model_provider'],
|
216
|
+
temperature=llm_config['temperature'],
|
217
|
+
max_tokens=llm_config['max_tokens'],
|
218
|
+
)
|
219
|
+
|
220
|
+
mcp_configs = config['mcp_servers']
|
221
|
+
logger.info(f'Initializing {len(mcp_configs)} MCP server(s)...\n')
|
222
|
+
tools, mcp_cleanup = await convert_mcp_to_langchain_tools(
|
223
|
+
mcp_configs,
|
224
|
+
logger
|
225
|
+
)
|
226
|
+
|
227
|
+
agent = create_react_agent(
|
228
|
+
llm,
|
229
|
+
tools
|
230
|
+
)
|
231
|
+
|
232
|
+
messages: List[BaseMessage] = []
|
233
|
+
system_prompt = llm_config.get('system_prompt')
|
234
|
+
if system_prompt and isinstance(system_prompt, str):
|
235
|
+
messages.append(SystemMessage(content=system_prompt))
|
236
|
+
|
237
|
+
return agent, messages, mcp_cleanup
|
238
|
+
|
239
|
+
|
240
|
+
async def run() -> None:
|
241
|
+
"""Main async function to set up and run the simple chat app."""
|
242
|
+
mcp_cleanup: Optional[McpServerCleanupFn] = None
|
243
|
+
try:
|
244
|
+
load_dotenv()
|
245
|
+
args = parse_arguments()
|
246
|
+
logger = init_logger(args.verbose)
|
247
|
+
config = load_config(args.config)
|
248
|
+
example_queries = (
|
249
|
+
config.get('example_queries')[:]
|
250
|
+
if config.get('example_queries') is not None
|
251
|
+
else []
|
252
|
+
)
|
253
|
+
|
254
|
+
agent, messages, mcp_cleanup = await init_react_agent(config, logger)
|
255
|
+
|
256
|
+
await handle_conversation(
|
257
|
+
agent,
|
258
|
+
messages,
|
259
|
+
example_queries,
|
260
|
+
args.verbose
|
261
|
+
)
|
262
|
+
|
263
|
+
finally:
|
264
|
+
if mcp_cleanup is not None:
|
265
|
+
await mcp_cleanup()
|
266
|
+
|
267
|
+
|
268
|
+
def main() -> None:
|
269
|
+
"""Entry point of the script."""
|
270
|
+
asyncio.run(run())
|
271
|
+
|
272
|
+
|
273
|
+
if __name__ == '__main__':
|
274
|
+
main()
|
@@ -0,0 +1,39 @@
|
|
1
|
+
import pyjson5 as json5
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import TypedDict, Optional, Any
|
4
|
+
|
5
|
+
|
6
|
+
class LLMConfig(TypedDict):
|
7
|
+
"""Type definition for LLM configuration."""
|
8
|
+
model_provider: str
|
9
|
+
model: Optional[str]
|
10
|
+
temperature: Optional[float]
|
11
|
+
system_prompt: Optional[str]
|
12
|
+
|
13
|
+
|
14
|
+
class ConfigError(Exception):
|
15
|
+
"""Base exception for configuration related errors."""
|
16
|
+
pass
|
17
|
+
|
18
|
+
|
19
|
+
class ConfigFileNotFoundError(ConfigError):
|
20
|
+
"""Raised when the configuration file cannot be found."""
|
21
|
+
pass
|
22
|
+
|
23
|
+
|
24
|
+
class ConfigValidationError(ConfigError):
|
25
|
+
"""Raised when the configuration fails validation."""
|
26
|
+
pass
|
27
|
+
|
28
|
+
|
29
|
+
def load_config(config_path: str):
|
30
|
+
"""Load and validate configuration from JSON5 file.
|
31
|
+
"""
|
32
|
+
config_file = Path(config_path)
|
33
|
+
if not config_file.exists():
|
34
|
+
raise ConfigFileNotFoundError(f"Config file {config_path} not found")
|
35
|
+
|
36
|
+
with open(config_file, 'r', encoding='utf-8') as f:
|
37
|
+
config: dict[str, Any] = json5.load(f)
|
38
|
+
|
39
|
+
return config
|
@@ -0,0 +1,93 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: langchain-mcp-tools
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: MCP Client Using LangChain / Python
|
5
|
+
Requires-Python: >=3.11
|
6
|
+
Description-Content-Type: text/markdown
|
7
|
+
License-File: LICENSE
|
8
|
+
Requires-Dist: jsonschema-pydantic>=0.6
|
9
|
+
Requires-Dist: langchain>=0.3.14
|
10
|
+
Requires-Dist: langchain-anthropic>=0.3.1
|
11
|
+
Requires-Dist: langchain-groq>=0.2.3
|
12
|
+
Requires-Dist: langchain-openai>=0.3.0
|
13
|
+
Requires-Dist: langgraph>=0.2.62
|
14
|
+
Requires-Dist: mcp>=1.2.0
|
15
|
+
Requires-Dist: pyjson5>=1.6.8
|
16
|
+
Requires-Dist: pympler>=1.1
|
17
|
+
Requires-Dist: python-dotenv>=1.0.1
|
18
|
+
|
19
|
+
# MCP Client Using LangChain / Python [](https://github.com/hideya/mcp-langchain-client-ts/blob/main/LICENSE)
|
20
|
+
|
21
|
+
This simple [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)
|
22
|
+
client demonstrates MCP server invocations by LangChain ReAct Agent.
|
23
|
+
|
24
|
+
It leverages a utility function `convert_mcp_to_langchain_tools()` from
|
25
|
+
`langchain_mcp_tools`.
|
26
|
+
This function handles parallel initialization of specified multiple MCP servers
|
27
|
+
and converts their available tools into a list of
|
28
|
+
[LangChain-compatible tools](https://js.langchain.com/docs/how_to/tool_calling/).
|
29
|
+
|
30
|
+
LLMs from Anthropic, OpenAI and Groq are currently supported.
|
31
|
+
|
32
|
+
A typescript version of this MCP client is available
|
33
|
+
[here](https://github.com/hideya/mcp-client-langchain-ts)
|
34
|
+
|
35
|
+
## Requirements
|
36
|
+
|
37
|
+
- Python 3.11+
|
38
|
+
- [`uv`](https://docs.astral.sh/uv/) installation
|
39
|
+
- API keys from [Anthropic](https://console.anthropic.com/settings/keys),
|
40
|
+
[OpenAI](https://platform.openai.com/api-keys), and/or
|
41
|
+
[Groq](https://console.groq.com/keys)
|
42
|
+
as needed
|
43
|
+
|
44
|
+
## Setup
|
45
|
+
1. Install dependencies:
|
46
|
+
```bash
|
47
|
+
make install
|
48
|
+
```
|
49
|
+
|
50
|
+
2. Setup API keys:
|
51
|
+
```bash
|
52
|
+
cp .env.template .env
|
53
|
+
```
|
54
|
+
- Update `.env` as needed.
|
55
|
+
- `.gitignore` is configured to ignore `.env`
|
56
|
+
to prevent accidental commits of the credentials.
|
57
|
+
|
58
|
+
3. Configure LLM and MCP Servers settings `llm_mcp_config.json5` as needed.
|
59
|
+
|
60
|
+
- [The configuration file format](https://github.com/hideya/mcp-client-langchain-ts/blob/main/llm_mcp_config.json5)
|
61
|
+
for MCP servers follows the same structure as
|
62
|
+
[Claude for Desktop](https://modelcontextprotocol.io/quickstart/user),
|
63
|
+
with one difference: the key name `mcpServers` has been changed
|
64
|
+
to `mcp_servers` to follow the snake_case convention
|
65
|
+
commonly used in JSON configuration files.
|
66
|
+
- The file format is [JSON5](https://json5.org/),
|
67
|
+
where comments and trailing commas are allowed.
|
68
|
+
- The format is further extended to replace `${...}` notations
|
69
|
+
with the values of corresponding environment variables.
|
70
|
+
- Keep all the credentials and private info in the `.env` file
|
71
|
+
and refer to them with `${...}` notation as needed.
|
72
|
+
|
73
|
+
|
74
|
+
## Usage
|
75
|
+
|
76
|
+
Run the app:
|
77
|
+
```bash
|
78
|
+
make start
|
79
|
+
```
|
80
|
+
|
81
|
+
Run in verbose mode:
|
82
|
+
```bash
|
83
|
+
make start-v
|
84
|
+
```
|
85
|
+
|
86
|
+
See commandline options:
|
87
|
+
```bash
|
88
|
+
make start-h
|
89
|
+
```
|
90
|
+
|
91
|
+
At the prompt, you can simply press Enter to use example queries that perform MCP server tool invocations.
|
92
|
+
|
93
|
+
Example queries can be configured in `llm_mcp_config.json5`
|
@@ -0,0 +1,12 @@
|
|
1
|
+
LICENSE
|
2
|
+
README.md
|
3
|
+
pyproject.toml
|
4
|
+
src/cli_chat.py
|
5
|
+
src/config_loader.py
|
6
|
+
src/langchain_mcp_tools.py
|
7
|
+
src/langchain_mcp_tools.egg-info/PKG-INFO
|
8
|
+
src/langchain_mcp_tools.egg-info/SOURCES.txt
|
9
|
+
src/langchain_mcp_tools.egg-info/dependency_links.txt
|
10
|
+
src/langchain_mcp_tools.egg-info/requires.txt
|
11
|
+
src/langchain_mcp_tools.egg-info/top_level.txt
|
12
|
+
tests/test_langchain_mcp_tools.py
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -0,0 +1,278 @@
|
|
1
|
+
# Standard library imports
|
2
|
+
import asyncio
|
3
|
+
import logging
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
from contextlib import AsyncExitStack, asynccontextmanager
|
7
|
+
from typing import (
|
8
|
+
Any,
|
9
|
+
Awaitable,
|
10
|
+
Callable,
|
11
|
+
Dict,
|
12
|
+
List,
|
13
|
+
NoReturn,
|
14
|
+
Tuple,
|
15
|
+
Type,
|
16
|
+
)
|
17
|
+
|
18
|
+
# Third-party imports
|
19
|
+
try:
|
20
|
+
from jsonschema_pydantic import jsonschema_to_pydantic # type: ignore
|
21
|
+
from langchain_core.tools import BaseTool, ToolException
|
22
|
+
from mcp import ClientSession, StdioServerParameters
|
23
|
+
from mcp.client.stdio import stdio_client
|
24
|
+
from pydantic import BaseModel
|
25
|
+
from pympler import asizeof
|
26
|
+
except ImportError as e:
|
27
|
+
print(f'\nError: Required package not found: {e}')
|
28
|
+
print('Please ensure all required packages are installed\n')
|
29
|
+
sys.exit(1)
|
30
|
+
|
31
|
+
|
32
|
+
"""
|
33
|
+
Resource Management Pattern for Parallel Server Initialization
|
34
|
+
--------------------------------------------------------------
|
35
|
+
This code implements a specific pattern for managing async resources that
|
36
|
+
require context managers while enabling parallel initialization.
|
37
|
+
The key aspects are:
|
38
|
+
|
39
|
+
1. Core Challenge:
|
40
|
+
- Managing async resources (stdio_client and ClientSession) that seems to
|
41
|
+
rely exclusively on asynccontextmanager for cleanup with no manual cleanup
|
42
|
+
options (based on the mcp python-sdk impl as of Jan 14, 2025 #62a0af6)
|
43
|
+
- Initializing multiple servers in parallel
|
44
|
+
- Keeping sessions alive for later use
|
45
|
+
- Ensuring proper cleanup in the same task that created them
|
46
|
+
|
47
|
+
2. Solution Strategy:
|
48
|
+
A key requirement for parallel initialization is that each server must be
|
49
|
+
initialized in its own dedicated task - there's no way around this if we
|
50
|
+
want true parallel initialization. However, this creates a challenge since
|
51
|
+
we also need to maintain long-lived sessions and handle cleanup properly.
|
52
|
+
|
53
|
+
The key insight is to keep the initialization tasks alive throughout the
|
54
|
+
session lifetime, rather than letting them complete after initialization.
|
55
|
+
By using events for coordination, we can:
|
56
|
+
- Allow parallel initialization while maintaining proper context management
|
57
|
+
- Keep each initialization task running until explicit cleanup is requested
|
58
|
+
- Ensure cleanup occurs in the same task that created the resources
|
59
|
+
- Provide a clean interface for the caller to manage the lifecycle
|
60
|
+
|
61
|
+
Alternative Considered:
|
62
|
+
A generator/coroutine approach using 'finally' block for cleanup was
|
63
|
+
considered but rejected because:
|
64
|
+
- The 'finally' block in a generator/coroutine can be executed by a
|
65
|
+
different task than the one that ran the main body of the code
|
66
|
+
- This breaks the requirement that AsyncExitStack.aclose() must be
|
67
|
+
called from the same task that created the context
|
68
|
+
|
69
|
+
3. Task Lifecycle:
|
70
|
+
To allow the initialization task to stay alive waiting for cleanup:
|
71
|
+
[Task starts]
|
72
|
+
↓
|
73
|
+
Initialize server & convert tools
|
74
|
+
↓
|
75
|
+
Set ready_event (signals tools are ready)
|
76
|
+
↓
|
77
|
+
await cleanup_event.wait() (keeps task alive)
|
78
|
+
↓
|
79
|
+
When cleanup_event is set:
|
80
|
+
exit_stack.aclose() (cleanup in original task)
|
81
|
+
|
82
|
+
This pattern enables parallel initialization while maintaining proper async
|
83
|
+
resource lifecycle management through context managers.
|
84
|
+
"""
|
85
|
+
|
86
|
+
|
87
|
+
async def spawn_mcp_server_tools_task(
|
88
|
+
server_name: str,
|
89
|
+
server_config: Dict[str, Any],
|
90
|
+
langchain_tools: List[BaseTool],
|
91
|
+
ready_event: asyncio.Event,
|
92
|
+
cleanup_event: asyncio.Event,
|
93
|
+
logger: logging.Logger = logging.getLogger(__name__)
|
94
|
+
) -> None:
|
95
|
+
"""Convert MCP server tools to LangChain compatible tools
|
96
|
+
and manage lifecycle.
|
97
|
+
|
98
|
+
This task initializes an MCP server connection, converts its tools
|
99
|
+
to LangChain format, and manages the connection lifecycle.
|
100
|
+
It adds the tools to the provided langchain_tools list and uses events
|
101
|
+
for synchronization.
|
102
|
+
|
103
|
+
Args:
|
104
|
+
server_name: Name of the MCP server
|
105
|
+
server_config: Server configuration dictionary containing command,
|
106
|
+
args, and env
|
107
|
+
langchain_tools: List to which the converted LangChain tools will
|
108
|
+
be appended
|
109
|
+
ready_event: Event to signal when tools are ready for use
|
110
|
+
cleanup_event: Event to trigger cleanup and connection closure
|
111
|
+
logger: Logger instance to use for logging events and errors.
|
112
|
+
Defaults to module logger.
|
113
|
+
|
114
|
+
Returns:
|
115
|
+
None
|
116
|
+
|
117
|
+
Raises:
|
118
|
+
Exception: If there's an error in server connection or tool conversion
|
119
|
+
"""
|
120
|
+
try:
|
121
|
+
logger.info(f'MCP server "{server_name}": initializing with:',
|
122
|
+
server_config)
|
123
|
+
|
124
|
+
# NOTE: `uv` and `npx` seem to require PATH to be set.
|
125
|
+
# To avoid confusion, it was decided to automatically append it
|
126
|
+
# to the env if not explicitly set by the config.
|
127
|
+
env = dict(server_config.get('env', {}))
|
128
|
+
if 'PATH' not in env:
|
129
|
+
env['PATH'] = os.environ.get('PATH', '')
|
130
|
+
|
131
|
+
server_params = StdioServerParameters(
|
132
|
+
command=server_config['command'],
|
133
|
+
args=server_config.get('args', []),
|
134
|
+
env=env
|
135
|
+
)
|
136
|
+
|
137
|
+
@asynccontextmanager
|
138
|
+
async def log_before_aexit(context_manager, message):
|
139
|
+
yield await context_manager.__aenter__()
|
140
|
+
logger.info(message)
|
141
|
+
await context_manager.__aexit__(None, None, None)
|
142
|
+
|
143
|
+
exit_stack = AsyncExitStack()
|
144
|
+
|
145
|
+
stdio_transport = await exit_stack.enter_async_context(
|
146
|
+
stdio_client(server_params)
|
147
|
+
)
|
148
|
+
read, write = stdio_transport
|
149
|
+
|
150
|
+
session = await exit_stack.enter_async_context(
|
151
|
+
log_before_aexit(
|
152
|
+
ClientSession(read, write),
|
153
|
+
f'MCP server "{server_name}": session closed'
|
154
|
+
)
|
155
|
+
)
|
156
|
+
|
157
|
+
await session.initialize()
|
158
|
+
logger.info(f'MCP server "{server_name}": connected')
|
159
|
+
|
160
|
+
tools_response = await session.list_tools()
|
161
|
+
|
162
|
+
for tool in tools_response.tools:
|
163
|
+
class McpToLangChainAdapter(BaseTool):
|
164
|
+
name: str = tool.name or 'NO NAME'
|
165
|
+
description: str = tool.description or ''
|
166
|
+
args_schema: Type[BaseModel] = jsonschema_to_pydantic(
|
167
|
+
tool.inputSchema
|
168
|
+
)
|
169
|
+
|
170
|
+
def _run(self, **kwargs: Any) -> NoReturn:
|
171
|
+
raise NotImplementedError(
|
172
|
+
'Only async operation is supported'
|
173
|
+
)
|
174
|
+
|
175
|
+
async def _arun(self, **kwargs: Any) -> Any:
|
176
|
+
logger.info(f'MCP tool "{server_name}"/"{tool.name}"'
|
177
|
+
f' received input:', kwargs)
|
178
|
+
result = await session.call_tool(self.name, kwargs)
|
179
|
+
if result.isError:
|
180
|
+
raise ToolException(result.content)
|
181
|
+
|
182
|
+
size = asizeof.asizeof(result.content)
|
183
|
+
logger.info(f'MCP tool "{server_name}"/"{tool.name}" '
|
184
|
+
f'received result (size: {size})')
|
185
|
+
return result.content
|
186
|
+
|
187
|
+
langchain_tools.append(McpToLangChainAdapter())
|
188
|
+
|
189
|
+
logger.info(f'MCP server "{server_name}": {len(langchain_tools)} '
|
190
|
+
f'tool(s) available:')
|
191
|
+
for tool in langchain_tools:
|
192
|
+
logger.info(f'- {tool.name}')
|
193
|
+
except Exception as e:
|
194
|
+
logger.error(f'Error getting response: {str(e)}')
|
195
|
+
raise
|
196
|
+
|
197
|
+
ready_event.set()
|
198
|
+
|
199
|
+
await cleanup_event.wait()
|
200
|
+
|
201
|
+
await exit_stack.aclose()
|
202
|
+
|
203
|
+
|
204
|
+
McpServerCleanupFn = Callable[[], Awaitable[None]]
|
205
|
+
|
206
|
+
|
207
|
+
async def convert_mcp_to_langchain_tools(
|
208
|
+
server_configs: Dict[str, Dict[str, Any]],
|
209
|
+
logger: logging.Logger = logging.getLogger(__name__)
|
210
|
+
) -> Tuple[List[BaseTool], McpServerCleanupFn]:
|
211
|
+
"""Initialize multiple MCP servers and convert their tools to
|
212
|
+
LangChain format.
|
213
|
+
|
214
|
+
This async function manages parallel initialization of multiple MCP
|
215
|
+
servers, converts their tools to LangChain format, and provides a cleanup
|
216
|
+
mechanism. It orchestrates the full lifecycle of multiple servers.
|
217
|
+
|
218
|
+
Args:
|
219
|
+
server_configs: Dictionary mapping server names to their
|
220
|
+
configurations, where each configuration contains command, args,
|
221
|
+
and env settings
|
222
|
+
logger: Logger instance to use for logging events and errors.
|
223
|
+
Defaults to module logger.
|
224
|
+
|
225
|
+
Returns:
|
226
|
+
A tuple containing:
|
227
|
+
- List of converted LangChain tools from all servers
|
228
|
+
- Async cleanup function to properly shutdown all server
|
229
|
+
connections
|
230
|
+
|
231
|
+
Example:
|
232
|
+
server_configs = {
|
233
|
+
"server1": {"command": "npm", "args": ["start"]},
|
234
|
+
"server2": {"command": "./server", "args": ["-p", "8000"]}
|
235
|
+
}
|
236
|
+
tools, cleanup = await convert_mcp_to_langchain_tools(server_configs)
|
237
|
+
# Use tools...
|
238
|
+
await cleanup()
|
239
|
+
"""
|
240
|
+
per_server_tools = []
|
241
|
+
ready_event_list = []
|
242
|
+
cleanup_event_list = []
|
243
|
+
|
244
|
+
tasks = []
|
245
|
+
for server_name, server_config in server_configs.items():
|
246
|
+
server_tools_accumulator: List[BaseTool] = []
|
247
|
+
per_server_tools.append(server_tools_accumulator)
|
248
|
+
ready_event = asyncio.Event()
|
249
|
+
ready_event_list.append(ready_event)
|
250
|
+
cleanup_event = asyncio.Event()
|
251
|
+
cleanup_event_list.append(cleanup_event)
|
252
|
+
task = asyncio.create_task(spawn_mcp_server_tools_task(
|
253
|
+
server_name,
|
254
|
+
server_config,
|
255
|
+
server_tools_accumulator,
|
256
|
+
ready_event,
|
257
|
+
cleanup_event,
|
258
|
+
logger
|
259
|
+
))
|
260
|
+
tasks.append(task)
|
261
|
+
|
262
|
+
for ready_event in ready_event_list:
|
263
|
+
await ready_event.wait()
|
264
|
+
|
265
|
+
langchain_tools = [
|
266
|
+
item for sublist in per_server_tools for item in sublist
|
267
|
+
]
|
268
|
+
|
269
|
+
async def mcp_cleanup() -> None:
|
270
|
+
for cleanup_event in cleanup_event_list:
|
271
|
+
cleanup_event.set()
|
272
|
+
|
273
|
+
logger.info(f'MCP servers initialized: {len(langchain_tools)} tool(s) '
|
274
|
+
f'available in total')
|
275
|
+
for tool in langchain_tools:
|
276
|
+
logger.debug(f'- {tool.name}')
|
277
|
+
|
278
|
+
return langchain_tools, mcp_cleanup
|
@@ -0,0 +1,163 @@
|
|
1
|
+
import pytest
|
2
|
+
from unittest.mock import AsyncMock, MagicMock, patch
|
3
|
+
from langchain_core.tools import BaseTool
|
4
|
+
from src.langchain_mcp_tools import (
|
5
|
+
convert_mcp_to_langchain_tools,
|
6
|
+
)
|
7
|
+
|
8
|
+
# Fix the asyncio mark warning by installing pytest-asyncio
|
9
|
+
pytest_plugins = ('pytest_asyncio',)
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture
|
13
|
+
def mock_stdio_client():
|
14
|
+
with patch('src.langchain_mcp_tools.stdio_client') as mock:
|
15
|
+
mock.return_value.__aenter__.return_value = (AsyncMock(), AsyncMock())
|
16
|
+
yield mock
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.fixture
|
20
|
+
def mock_client_session():
|
21
|
+
with patch('src.langchain_mcp_tools.ClientSession') as mock:
|
22
|
+
session = AsyncMock()
|
23
|
+
# Mock the list_tools response
|
24
|
+
session.list_tools.return_value = MagicMock(
|
25
|
+
tools=[
|
26
|
+
MagicMock(
|
27
|
+
name="tool1",
|
28
|
+
description="Test tool",
|
29
|
+
inputSchema={"type": "object", "properties": {}}
|
30
|
+
)
|
31
|
+
]
|
32
|
+
)
|
33
|
+
mock.return_value.__aenter__.return_value = session
|
34
|
+
yield mock
|
35
|
+
|
36
|
+
|
37
|
+
@pytest.mark.asyncio
|
38
|
+
async def test_convert_mcp_to_langchain_tools_empty():
|
39
|
+
server_configs = {}
|
40
|
+
tools, cleanup = await convert_mcp_to_langchain_tools(server_configs)
|
41
|
+
assert isinstance(tools, list)
|
42
|
+
assert len(tools) == 0
|
43
|
+
await cleanup()
|
44
|
+
|
45
|
+
|
46
|
+
"""
|
47
|
+
@pytest.mark.asyncio
|
48
|
+
async def test_convert_mcp_to_langchain_tools_invalid_config():
|
49
|
+
server_configs = {"invalid": {"command": "nonexistent"}}
|
50
|
+
with pytest.raises(Exception):
|
51
|
+
await convert_mcp_to_langchain_tools(server_configs)
|
52
|
+
"""
|
53
|
+
|
54
|
+
|
55
|
+
"""
|
56
|
+
@pytest.mark.asyncio
|
57
|
+
async def test_convert_single_mcp_success(
|
58
|
+
mock_stdio_client,
|
59
|
+
mock_client_session
|
60
|
+
):
|
61
|
+
# Test data
|
62
|
+
server_name = "test_server"
|
63
|
+
server_config = {
|
64
|
+
"command": "test_command",
|
65
|
+
"args": ["--test"],
|
66
|
+
"env": {"TEST_ENV": "value"}
|
67
|
+
}
|
68
|
+
langchain_tools = []
|
69
|
+
ready_event = asyncio.Event()
|
70
|
+
cleanup_event = asyncio.Event()
|
71
|
+
|
72
|
+
# Create task
|
73
|
+
task = asyncio.create_task(
|
74
|
+
convert_single_mcp_to_langchain_tools(
|
75
|
+
server_name,
|
76
|
+
server_config,
|
77
|
+
langchain_tools,
|
78
|
+
ready_event,
|
79
|
+
cleanup_event
|
80
|
+
)
|
81
|
+
)
|
82
|
+
|
83
|
+
# Wait for ready event
|
84
|
+
await asyncio.wait_for(ready_event.wait(), timeout=1.0)
|
85
|
+
|
86
|
+
# Verify tools were created
|
87
|
+
assert len(langchain_tools) == 1
|
88
|
+
assert isinstance(langchain_tools[0], BaseTool)
|
89
|
+
assert langchain_tools[0].name == "tool1"
|
90
|
+
|
91
|
+
# Trigger cleanup
|
92
|
+
cleanup_event.set()
|
93
|
+
await task
|
94
|
+
"""
|
95
|
+
|
96
|
+
|
97
|
+
@pytest.mark.asyncio
|
98
|
+
async def test_convert_mcp_to_langchain_tools_multiple_servers(
|
99
|
+
mock_stdio_client,
|
100
|
+
mock_client_session
|
101
|
+
):
|
102
|
+
server_configs = {
|
103
|
+
"server1": {"command": "cmd1", "args": []},
|
104
|
+
"server2": {"command": "cmd2", "args": []}
|
105
|
+
}
|
106
|
+
|
107
|
+
tools, cleanup = await convert_mcp_to_langchain_tools(server_configs)
|
108
|
+
|
109
|
+
# Verify correct number of tools created
|
110
|
+
assert len(tools) == 2 # One tool per server
|
111
|
+
assert all(isinstance(tool, BaseTool) for tool in tools)
|
112
|
+
|
113
|
+
# Test cleanup
|
114
|
+
await cleanup()
|
115
|
+
|
116
|
+
|
117
|
+
"""
|
118
|
+
@pytest.mark.asyncio
|
119
|
+
async def test_tool_execution(mock_stdio_client, mock_client_session):
|
120
|
+
server_configs = {
|
121
|
+
"test_server": {"command": "test", "args": []}
|
122
|
+
}
|
123
|
+
|
124
|
+
# Mock the tool execution response
|
125
|
+
session = mock_client_session.return_value.__aenter__.return_value
|
126
|
+
session.call_tool.return_value = MagicMock(
|
127
|
+
isError=False,
|
128
|
+
content={"result": "success"}
|
129
|
+
)
|
130
|
+
|
131
|
+
tools, cleanup = await convert_mcp_to_langchain_tools(server_configs)
|
132
|
+
|
133
|
+
# Test tool execution
|
134
|
+
result = await tools[0]._arun(test_param="value")
|
135
|
+
assert result == {"result": "success"}
|
136
|
+
|
137
|
+
# Verify tool was called with correct parameters
|
138
|
+
session.call_tool.assert_called_once_with("tool1", {"test_param": "value"})
|
139
|
+
|
140
|
+
await cleanup()
|
141
|
+
"""
|
142
|
+
|
143
|
+
|
144
|
+
@pytest.mark.asyncio
|
145
|
+
async def test_tool_execution_error(mock_stdio_client, mock_client_session):
|
146
|
+
server_configs = {
|
147
|
+
"test_server": {"command": "test", "args": []}
|
148
|
+
}
|
149
|
+
|
150
|
+
# Mock error response
|
151
|
+
session = mock_client_session.return_value.__aenter__.return_value
|
152
|
+
session.call_tool.return_value = MagicMock(
|
153
|
+
isError=True,
|
154
|
+
content="Error message"
|
155
|
+
)
|
156
|
+
|
157
|
+
tools, cleanup = await convert_mcp_to_langchain_tools(server_configs)
|
158
|
+
|
159
|
+
# Test tool execution error
|
160
|
+
with pytest.raises(Exception):
|
161
|
+
await tools[0]._arun(test_param="value")
|
162
|
+
|
163
|
+
await cleanup()
|