mcp-chat 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mcp_chat/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """MCP Client Using LangChain / Python."""
2
+
3
+ __version__ = "0.2.7"
mcp_chat/cli_chat.py ADDED
@@ -0,0 +1,270 @@
1
+ # Standard library imports
2
+ import argparse
3
+ import asyncio
4
+ from enum import Enum
5
+ import json
6
+ import logging
7
+ import sys
8
+ from pathlib import Path
9
+ from typing import (
10
+ Any,
11
+ cast,
12
+ )
13
+
14
+ # Third-party imports
15
+ try:
16
+ from dotenv import load_dotenv
17
+ from langchain.chat_models import init_chat_model
18
+ from langchain.schema import (
19
+ AIMessage,
20
+ BaseMessage,
21
+ HumanMessage,
22
+ SystemMessage,
23
+ )
24
+ from langchain_core.runnables.base import Runnable
25
+ from langchain_core.messages.tool import ToolMessage
26
+ from langgraph.prebuilt import create_react_agent
27
+ from langchain_mcp_tools import (
28
+ convert_mcp_to_langchain_tools,
29
+ McpServerCleanupFn,
30
+ )
31
+ except ImportError as e:
32
+ print(f"\nError: Required package not found: {e}")
33
+ print("Please ensure all required packages are installed\n")
34
+ sys.exit(1)
35
+
36
+ # Local application imports
37
+ from config_loader import load_config
38
+
39
+ # Type definitions
40
+ ConfigType = dict[str, Any]
41
+
42
+
43
+ # ANSI color escape codes
44
+ class Colors(str, Enum):
45
+ YELLOW = "\033[33m" # color to yellow
46
+ CYAN = "\033[36m" # color to cyan
47
+ RESET = "\033[0m" # reset color
48
+
49
+ def __str__(self):
50
+ return self.value
51
+
52
+
53
+ def parse_arguments() -> argparse.Namespace:
54
+ """Parse and return command line args for config path and verbosity."""
55
+ parser = argparse.ArgumentParser(
56
+ description="CLI Chat Application",
57
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
58
+ )
59
+ parser.add_argument(
60
+ "-c", "--config",
61
+ default="llm_mcp_config.json5",
62
+ help="path to config file",
63
+ type=Path,
64
+ metavar="PATH"
65
+ )
66
+ parser.add_argument(
67
+ "-v", "--verbose",
68
+ action="store_true",
69
+ help="run with verbose logging"
70
+ )
71
+ return parser.parse_args()
72
+
73
+
74
+ def init_logger(verbose: bool) -> logging.Logger:
75
+ """Initialize and return a logger with appropriate verbosity level."""
76
+ logging.basicConfig(
77
+ level=logging.DEBUG if verbose else logging.INFO,
78
+ format="\x1b[90m[%(levelname)s]\x1b[0m %(message)s"
79
+ )
80
+ return logging.getLogger()
81
+
82
+
83
+ def print_colored(text: str, color: Colors, end: str = "\n") -> None:
84
+ """Print text in specified color and reset afterwards."""
85
+ print(f"{color}{text}{Colors.RESET}", end=end)
86
+
87
+
88
+ def set_color(color: Colors) -> None:
89
+ """Set terminal color."""
90
+ print(color, end="")
91
+
92
+
93
+ def clear_line() -> None:
94
+ """Move up one line and clear it."""
95
+ print("\x1b[1A\x1b[2K", end="")
96
+
97
+
98
+ async def get_user_query(remaining_queries: list[str]) -> str | None:
99
+ """Get user input or next example query, handling empty inputs
100
+ and quit commands."""
101
+ set_color(Colors.YELLOW)
102
+ query = input("Query: ").strip()
103
+
104
+ if len(query) == 0:
105
+ if len(remaining_queries) > 0:
106
+ query = remaining_queries.pop(0)
107
+ clear_line()
108
+ print_colored(f"Example Query: {query}", Colors.YELLOW)
109
+ else:
110
+ set_color(Colors.RESET)
111
+ print("\nPlease type a query, or 'quit' or 'q' to exit\n")
112
+ return await get_user_query(remaining_queries)
113
+
114
+ print(Colors.RESET) # Reset after input
115
+
116
+ if query.lower() in ["quit", "q"]:
117
+ print_colored("Goodbye!\n", Colors.CYAN)
118
+ return None
119
+
120
+ return query
121
+
122
+
123
+ async def handle_conversation(
124
+ agent: Runnable,
125
+ messages: list[BaseMessage],
126
+ example_queries: list[str],
127
+ verbose: bool
128
+ ) -> None:
129
+ """Manage an interactive conversation loop between the user and AI agent.
130
+
131
+ Args:
132
+ agent (Runnable): The initialized ReAct agent that processes queries
133
+ messages (list[BaseMessage]): List to maintain conversation history
134
+ example_queries (list[str]): list of example queries that can be used
135
+ when user presses Enter
136
+ verbose (bool): Flag to control detailed output of tool responses
137
+
138
+ Exception handling:
139
+ - TypeError: Ensures response is in correct string format
140
+ - General exceptions: Allows conversation to continue after errors
141
+
142
+ The conversation continues until user types "quit" or "q".
143
+ """
144
+ print("\nConversation started. "
145
+ "Type 'quit' or 'q' to end the conversation.\n")
146
+ if len(example_queries) > 0:
147
+ print("Example Queries (just type Enter to supply them one by one):")
148
+ for ex_q in example_queries:
149
+ print(f"- {ex_q}")
150
+ print()
151
+
152
+ while True:
153
+ try:
154
+ query = await get_user_query(example_queries)
155
+ if not query:
156
+ break
157
+
158
+ messages.append(HumanMessage(content=query))
159
+
160
+ result = await agent.ainvoke({
161
+ "messages": messages
162
+ })
163
+
164
+ result_messages = cast(list[BaseMessage], result["messages"])
165
+ # the last message should be an AIMessage
166
+ response = result_messages[-1].content
167
+ if not isinstance(response, str):
168
+ raise TypeError(
169
+ f"Expected string response, got {type(response)}"
170
+ )
171
+
172
+ # check if msg one before is a ToolMessage
173
+ message_one_before = result_messages[-2]
174
+ if isinstance(message_one_before, ToolMessage):
175
+ if verbose:
176
+ # show tools call response
177
+ print(message_one_before.content)
178
+ # new line after tool call output
179
+ print()
180
+ print_colored(f"{response}\n", Colors.CYAN)
181
+ messages.append(AIMessage(content=response))
182
+
183
+ except Exception as e:
184
+ print(f"Error getting response: {str(e)}")
185
+ print("You can continue chatting or type 'quit' to exit.")
186
+
187
+
188
+ async def init_react_agent(
189
+ config: ConfigType,
190
+ logger: logging.Logger
191
+ ) -> tuple[Runnable, list[BaseMessage], McpServerCleanupFn]:
192
+ """Initialize and configure a ReAct agent for conversation handling.
193
+
194
+ Args:
195
+ config (ConfigType): Configuration dictionary containing LLM and
196
+ MCP server settings
197
+ logger (logging.Logger): Logger instance for initialization
198
+ status updates
199
+
200
+ Returns:
201
+ tuple[Runnable, list[BaseMessage], McpServerCleanupFn]:
202
+ Returns a tuple containing:
203
+ - Configured ReAct agent ready for conversation
204
+ - Initial message list (empty or with system prompt)
205
+ - Cleanup function for MCP server connections
206
+ """
207
+ llm_config = config["llm"]
208
+ logger.info(f"Initializing model... {json.dumps(llm_config, indent=2)}\n")
209
+
210
+ filtered_config = {
211
+ k: v for k, v in llm_config.items() if k not in ["system_prompt"]
212
+ }
213
+ llm = init_chat_model(**filtered_config)
214
+
215
+ mcp_servers = config["mcp_servers"]
216
+ logger.info(f"Initializing {len(mcp_servers)} MCP server(s)...\n")
217
+
218
+ tools, mcp_cleanup = await convert_mcp_to_langchain_tools(
219
+ mcp_servers,
220
+ logger
221
+ )
222
+
223
+ agent = create_react_agent(
224
+ llm,
225
+ tools
226
+ )
227
+
228
+ messages: list[BaseMessage] = []
229
+ system_prompt = llm_config.get("system_prompt")
230
+ if system_prompt and isinstance(system_prompt, str):
231
+ messages.append(SystemMessage(content=system_prompt))
232
+
233
+ return agent, messages, mcp_cleanup
234
+
235
+
236
+ async def run() -> None:
237
+ """Main async function to set up and run the simple chat app."""
238
+ mcp_cleanup: McpServerCleanupFn | None = None
239
+ try:
240
+ load_dotenv()
241
+ args = parse_arguments()
242
+ logger = init_logger(args.verbose)
243
+ config = load_config(args.config)
244
+ example_queries = (
245
+ config.get("example_queries")[:]
246
+ if config.get("example_queries") is not None
247
+ else []
248
+ )
249
+
250
+ agent, messages, mcp_cleanup = await init_react_agent(config, logger)
251
+
252
+ await handle_conversation(
253
+ agent,
254
+ messages,
255
+ example_queries,
256
+ args.verbose
257
+ )
258
+
259
+ finally:
260
+ if mcp_cleanup is not None:
261
+ await mcp_cleanup()
262
+
263
+
264
+ def main() -> None:
265
+ """Entry point of the script."""
266
+ asyncio.run(run())
267
+
268
+
269
+ if __name__ == "__main__":
270
+ main()
@@ -0,0 +1,77 @@
1
+ import os
2
+ import re
3
+ from pathlib import Path
4
+ from typing import TypedDict, Any
5
+ import pyjson5 as json5
6
+
7
+
8
+ class LLMConfig(TypedDict):
9
+ """Type definition for LLM configuration."""
10
+ model_provider: str
11
+ model: str | None
12
+ temperature: float | None
13
+ system_prompt: str | None
14
+
15
+
16
+ class ConfigError(Exception):
17
+ """Base exception for configuration related errors."""
18
+ pass
19
+
20
+
21
+ class ConfigFileNotFoundError(ConfigError):
22
+ """Raised when the configuration file cannot be found."""
23
+ pass
24
+
25
+
26
+ class ConfigValidationError(ConfigError):
27
+ """Raised when the configuration fails validation."""
28
+ pass
29
+
30
+
31
+ def load_config(config_path: str):
32
+ """Load and validate configuration from JSON5 file with environment
33
+ variable substitution.
34
+ """
35
+ config_file = Path(config_path)
36
+ if not config_file.exists():
37
+ raise ConfigFileNotFoundError(f"Config file {config_path} not found")
38
+
39
+ with open(config_file, "r", encoding="utf-8") as f:
40
+ content = f.read()
41
+
42
+ # Replace ${VAR_NAME} with environment variable values, but skip comments
43
+ def replace_env_var(match):
44
+ var_name = match.group(1)
45
+ env_value = os.getenv(var_name)
46
+ if env_value is None:
47
+ raise ConfigValidationError(
48
+ f'Environment variable "{var_name}" not found '
49
+ f'in "{config_file}"'
50
+ )
51
+ return env_value
52
+
53
+ # Process line by line to skip comments
54
+ lines = content.split("\n")
55
+ processed_lines = []
56
+
57
+ for line in lines:
58
+ # Split line at first occurrence of "//"
59
+ if "//" in line:
60
+ code_part, comment_part = line.split("//", 1)
61
+ # Apply substitution only to the code part
62
+ processed_code = re.sub(r"\$\{([^}]+)\}", replace_env_var,
63
+ code_part)
64
+ # Reconstruct line with original comment
65
+ processed_line = processed_code + "//" + comment_part
66
+ else:
67
+ # No comment in line, apply substitution to entire line
68
+ processed_line = re.sub(r"\$\{([^}]+)\}", replace_env_var, line)
69
+
70
+ processed_lines.append(processed_line)
71
+
72
+ content = "\n".join(processed_lines)
73
+
74
+ # Parse the substituted content
75
+ config: dict[str, Any] = json5.loads(content)
76
+
77
+ return config
@@ -0,0 +1,151 @@
1
+ Metadata-Version: 2.4
2
+ Name: mcp-chat
3
+ Version: 0.0.1
4
+ Summary: Simple CLI MCP Client to quickly test and explore MCP servers from the command line
5
+ Project-URL: Bug Tracker, https://github.com/hideya/mcp-client-langchain-py/issues
6
+ Project-URL: Source Code, https://github.com/hideya/mcp-client-langchain-py
7
+ License-File: LICENSE
8
+ Keywords: cli,client,explore,langchain,mcp,model-context-protocol,python,quick,simple,test,tools,try
9
+ Requires-Python: >=3.11
10
+ Requires-Dist: langchain-anthropic>=0.3.1
11
+ Requires-Dist: langchain-google-genai>=2.1.5
12
+ Requires-Dist: langchain-groq>=0.2.3
13
+ Requires-Dist: langchain-mcp-tools>=0.2.7
14
+ Requires-Dist: langchain-openai>=0.3.0
15
+ Requires-Dist: langchain>=0.3.26
16
+ Requires-Dist: langgraph>=0.5.0
17
+ Requires-Dist: pyjson5>=1.6.8
18
+ Requires-Dist: python-dotenv>=1.0.1
19
+ Requires-Dist: websockets>=15.0.1
20
+ Description-Content-Type: text/markdown
21
+
22
+ # Simple CLI MCP Client Using LangChain / Python [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/hideya/langchain-mcp-tools-py/blob/main/LICENSE)
23
+
24
+ This is a simple [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) client
25
+ that is intended for trying out MCP servers via a command-line interface.
26
+
27
+ When testing LLM and MCP servers, their settings can be conveniently configured via a configuration file, such as the following:
28
+
29
+ ```json5
30
+ {
31
+ "llm": {
32
+ "model_provider": "openai",
33
+ "model": "gpt-4o-mini",
34
+ // "model_provider": "anthropic",
35
+ // "model": "claude-3-5-haiku-latest",
36
+ // "model_provider": "google_genai",
37
+ // "model": "gemini-2.0-flash",
38
+ },
39
+
40
+ "mcp_servers": {
41
+ "fetch": {
42
+ "command": "uvx",
43
+ "args": [ "mcp-server-fetch" ]
44
+ },
45
+
46
+ "weather": {
47
+ "command": "npx",
48
+ "args": [ "-y", "@h1deya/mcp-server-weather" ]
49
+ },
50
+
51
+ // Auto-detection: tries Streamable HTTP first, falls back to SSE
52
+ "remote-mcp-server": {
53
+ "url": "https://${SERVER_HOST}:${SERVER_PORT}/..."
54
+ },
55
+
56
+ // Example of authentication via Authorization header
57
+ "github": {
58
+ "type": "http", // recommended to specify the protocol explicitly when authentication is used
59
+ "url": "https://api.githubcopilot.com/mcp/",
60
+ "headers": {
61
+ "Authorization": "Bearer ${GITHUB_PERSONAL_ACCESS_TOKEN}"
62
+ }
63
+ },
64
+ }
65
+ }
66
+ ```
67
+
68
+ It leverages [LangChain ReAct Agent](https://langchain-ai.github.io/langgraph/reference/agents/) and
69
+ a utility function `convert_mcp_to_langchain_tools()` from
70
+ [`langchain_mcp_tools`](https://pypi.org/project/langchain-mcp-tools/).
71
+ This function handles parallel initialization of specified multiple MCP servers
72
+ and converts their available tools into a list of LangChain-compatible tools
73
+ ([list[BaseTool]](https://python.langchain.com/api_reference/core/tools/langchain_core.tools.base.BaseTool.html#langchain_core.tools.base.BaseTool)).
74
+
75
+ This client supports both local (stdio) MCP servers as well as
76
+ remote (Streamable HTTP / SSE / WebSocket) MCP servers
77
+ which are accessible via a simple URL and optional headers for authentication and other purposes.
78
+
79
+ This client only supports text results of MCP tool calls and disregards other result types.
80
+
81
+ For the convenience of debugging MCP servers, this client prints local (stdio) MCP server logs to the console.
82
+
83
+ LLMs from Anthropic, OpenAI and Google (GenAI) are currently supported.
84
+
85
+ A TypeScript version of this MCP client is available
86
+ [here](https://github.com/hideya/mcp-client-langchain-ts)
87
+
88
+ ## Prerequisites
89
+
90
+ - Python 3.11+
91
+ - [optional] [`uv` (`uvx`)](https://docs.astral.sh/uv/getting-started/installation/)
92
+ installed to run Python package-based MCP servers
93
+ - [optional] [npm 7+ (`npx`)](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
94
+ to run Node.js package-based MCP servers
95
+ - API keys from [Anthropic](https://console.anthropic.com/settings/keys),
96
+ [OpenAI](https://platform.openai.com/api-keys), and/or
97
+ [Groq](https://console.groq.com/keys)
98
+ as needed
99
+
100
+ ## Setup
101
+
102
+ 1. Install dependencies:
103
+ ```bash
104
+ make install
105
+ ```
106
+
107
+ 2. Setup API keys:
108
+ ```bash
109
+ cp .env.template .env
110
+ ```
111
+ - Update `.env` as needed.
112
+ - `.gitignore` is configured to ignore `.env`
113
+ to prevent accidental commits of the credentials.
114
+
115
+ 3. Configure LLM and MCP Servers settings `llm_mcp_config.json5` as needed.
116
+
117
+ - [The configuration file format](https://github.com/hideya/mcp-client-langchain-ts/blob/main/llm_mcp_config.json5)
118
+ for MCP servers follows the same structure as
119
+ [Claude for Desktop](https://modelcontextprotocol.io/quickstart/user),
120
+ with one difference: the key name `mcpServers` has been changed
121
+ to `mcp_servers` to follow the snake_case convention
122
+ commonly used in JSON configuration files.
123
+ - The file format is [JSON5](https://json5.org/),
124
+ where comments and trailing commas are allowed.
125
+ - The format is further extended to replace `${...}` notations
126
+ with the values of corresponding environment variables.
127
+ - Keep all the credentials and private info in the `.env` file
128
+ and refer to them with `${...}` notation as needed.
129
+
130
+
131
+ ## Usage
132
+
133
+ Run the app:
134
+ ```bash
135
+ make start
136
+ ```
137
+ It takes a while on the first run.
138
+
139
+ Run in verbose mode:
140
+ ```bash
141
+ make start-v
142
+ ```
143
+
144
+ See commandline options:
145
+ ```bash
146
+ make start-h
147
+ ```
148
+
149
+ At the prompt, you can simply press Enter to use example queries that perform MCP server tool invocations.
150
+
151
+ Example queries can be configured in `llm_mcp_config.json5`
@@ -0,0 +1,8 @@
1
+ mcp_chat/__init__.py,sha256=cxW2vB9rLAKUrNgEJ9hxa3sAK95x9-3_7RZaTxCVTTo,66
2
+ mcp_chat/cli_chat.py,sha256=raNzfFxm6B0RfMZ2HjCYLbQnzr_SHgvoOpDeqIehly0,8080
3
+ mcp_chat/config_loader.py,sha256=SXQYTf_i5ZG7F8_EUC_8_SC0W4R97Juqx0jCllYS0Wk,2325
4
+ mcp_chat-0.0.1.dist-info/METADATA,sha256=PncSynhjA5xiNVVpkHHZXmNq6teReYTxVJUv4XyC_Ro,5564
5
+ mcp_chat-0.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ mcp_chat-0.0.1.dist-info/entry_points.txt,sha256=nVuS7z3aG9zJ6VcZ4OK9j9lKea-fnNmMluMikwG3z-o,52
7
+ mcp_chat-0.0.1.dist-info/licenses/LICENSE,sha256=CRC91e8v116gCpnp7h49oIa6_zjhxqnHFTREeoZFJwA,1072
8
+ mcp_chat-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ mcp-chat = mcp_chat.cli_chat:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 hideya kawahara
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.