mseep-mcp-simple-chatbot 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. mseep_mcp_simple_chatbot-0.1.1/.env.example +9 -0
  2. mseep_mcp_simple_chatbot-0.1.1/.gitai/config.yaml +35 -0
  3. mseep_mcp_simple_chatbot-0.1.1/.gitai/prompts/commit_prompt.txt +51 -0
  4. mseep_mcp_simple_chatbot-0.1.1/.gitai/prompts/pr_prompt.txt +56 -0
  5. mseep_mcp_simple_chatbot-0.1.1/.gitignore +5 -0
  6. mseep_mcp_simple_chatbot-0.1.1/.python-version +1 -0
  7. mseep_mcp_simple_chatbot-0.1.1/LICENSE +21 -0
  8. mseep_mcp_simple_chatbot-0.1.1/PKG-INFO +23 -0
  9. mseep_mcp_simple_chatbot-0.1.1/README.md +268 -0
  10. mseep_mcp_simple_chatbot-0.1.1/README_ZH.md +279 -0
  11. mseep_mcp_simple_chatbot-0.1.1/assets/chatbot_streamlit_demo.png +0 -0
  12. mseep_mcp_simple_chatbot-0.1.1/assets/chatbot_streamlit_demo_light.png +0 -0
  13. mseep_mcp_simple_chatbot-0.1.1/assets/mcp_chatbot_logo.png +0 -0
  14. mseep_mcp_simple_chatbot-0.1.1/assets/mcp_chatbot_streamlit_demo_low.gif +0 -0
  15. mseep_mcp_simple_chatbot-0.1.1/assets/single_prompt_demo.png +0 -0
  16. mseep_mcp_simple_chatbot-0.1.1/data-example/mcp-python-sdk-readme.md +622 -0
  17. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_streamlit/README.md +78 -0
  18. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_streamlit/README_ZH.md +75 -0
  19. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_streamlit/app.py +894 -0
  20. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_terminal/README.md +144 -0
  21. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_terminal/README_ZH.md +141 -0
  22. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_terminal/chatbot_terminal.py +96 -0
  23. mseep_mcp_simple_chatbot-0.1.1/example/chatbot_terminal/chatbot_terminal_stream.py +139 -0
  24. mseep_mcp_simple_chatbot-0.1.1/example/single_prompt/README.md +83 -0
  25. mseep_mcp_simple_chatbot-0.1.1/example/single_prompt/README_ZH.md +80 -0
  26. mseep_mcp_simple_chatbot-0.1.1/example/single_prompt/single_prompt.py +64 -0
  27. mseep_mcp_simple_chatbot-0.1.1/example/single_prompt/single_prompt_stream.py +100 -0
  28. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/__init__.py +7 -0
  29. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/chat/__init__.py +3 -0
  30. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/chat/session.py +611 -0
  31. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/config/__init__.py +0 -0
  32. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/config/configuration.py +100 -0
  33. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/llm/__init__.py +37 -0
  34. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/llm/oai.py +70 -0
  35. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/llm/ollama.py +93 -0
  36. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/mcp/__init__.py +4 -0
  37. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/mcp/client.py +141 -0
  38. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/mcp/mcp_tool.py +35 -0
  39. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/utils/__init__.py +4 -0
  40. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/utils/stream_printer.py +107 -0
  41. mseep_mcp_simple_chatbot-0.1.1/mcp_chatbot/utils/workflow.py +219 -0
  42. mseep_mcp_simple_chatbot-0.1.1/mcp_servers/markdown_processor.py +82 -0
  43. mseep_mcp_simple_chatbot-0.1.1/mcp_servers/servers_config.json +13 -0
  44. mseep_mcp_simple_chatbot-0.1.1/pyproject.toml +76 -0
  45. mseep_mcp_simple_chatbot-0.1.1/requirements.txt +6 -0
  46. mseep_mcp_simple_chatbot-0.1.1/scripts/check.sh +42 -0
  47. mseep_mcp_simple_chatbot-0.1.1/scripts/unittest.sh +12 -0
  48. mseep_mcp_simple_chatbot-0.1.1/test/chat/test_session.py +178 -0
  49. mseep_mcp_simple_chatbot-0.1.1/uv.lock +1387 -0
@@ -0,0 +1,9 @@
1
+ LLM_API_KEY="your_llm_api_key_here"
2
+ LLM_BASE_URL="your_llm_base_url_here"
3
+ LLM_MODEL_NAME="your_llm_model_name_here"
4
+
5
+ OLLAMA_MODEL_NAME="your_ollama_model_name_here"
6
+ OLLAMA_BASE_URL="your_ollama_base_url_here"
7
+
8
+ MARKDOWN_FOLDER_PATH="/path/to/your/markdown/folder"
9
+ RESULT_FOLDER_PATH="/path/to/your/result/folder"
@@ -0,0 +1,35 @@
1
+ llm:
2
+ default:
3
+ provider: openai
4
+ model: gpt-4o-mini
5
+ # apiKey: 'sk-...' # Highly recommended to set via environment variable, not hard-coded in the config file
6
+ apiKeyEnvVar: 'OPENAI_API_KEY' # Set it in your environment variables or .env file
7
+ # baseUrl: 'your-custom-base-url'
8
+ baseUrlEnvVar: 'OPENAI_BASE_URL' # Set it in your environment variables or .env file
9
+ ollamaBaseUrl: 'http://localhost:11434' # Only needed when provider is ollama
10
+ temperature: 0.7
11
+
12
+ commands: # Can override the default settings for each command
13
+ commit:
14
+ provider: openai
15
+ model: gpt-4o-mini
16
+ apiKeyEnvVar: 'OPENAI_API_KEY'
17
+ baseUrlEnvVar: 'OPENAI_BASE_URL'
18
+ pr:
19
+ provider: deepseek # Can be changed to 'deepseek' or 'ollama' ...
20
+ model: deepseek-chat
21
+ apiKeyEnvVar: 'DEEPSEEK_API_KEY'
22
+ baseUrlEnvVar: 'DEEPSEEK_BASE_URL'
23
+ temperature: 0.4
24
+
25
+ commit:
26
+ suggestions: 3
27
+ prompt_template: './prompts/commit_prompt.txt'
28
+
29
+ pr:
30
+ base_branch: main # When user doesn't pass --target
31
+ include_file_tree: true # Whether to include the file tree generated by ls-files
32
+ include_unstaged: false # Corresponds to --unstaged
33
+ max_lines_per_file: 300 # Diff truncation threshold
34
+ warn_on_conflict: true # Whether to interrupt when merge conflicts are detected
35
+ prompt_template: ./prompts/pr_prompt.txt
@@ -0,0 +1,51 @@
1
+ You are an AI assistant specialized in crafting Git commit messages according to the Conventional Commits specification (v1.0.0). Your task is to generate {{ suggestions }} distinct commit messages based on the provided file diff.
2
+
3
+ **Conventional Commits Structure:**
4
+ <type>[optional scope]: <description>
5
+
6
+ [optional body]
7
+
8
+ [optional footer(s)]
9
+
10
+ **Key Types:**
11
+ - `fix`: A bug fix (correlates with PATCH in SemVer).
12
+ - `feat`: A new feature (correlates with MINOR in SemVer).
13
+ - Other allowed types (non-exhaustive): `build`, `chore`, `ci`, `docs`, `style`, `refactor`, `perf`, `test`.
14
+
15
+ **Breaking Changes:**
16
+ - Indicated by appending `!` to the type/scope (e.g., `feat!:` or `refactor(api)!:`).
17
+ - OR by including a `BREAKING CHANGE:` footer.
18
+ - If a breaking change is present, ensure the `!` marker or the footer is included.
19
+
20
+ **Scope:**
21
+ - Optional, enclosed in parentheses, providing context (e.g., `feat(parser): ...`).
22
+ - Infer a relevant scope if possible from the file paths or changes. If no specific scope is clear, omit it.
23
+
24
+ **Description:**
25
+ - Concise summary of the change, in the imperative mood (e.g., "add feature" not "added feature").
26
+ - Keep the description short, ideally under 50 characters.
27
+
28
+ **Body & Footer (Optional for this task):**
29
+ - For this task, focus on generating the header line (<type>[scope]: <description>). Do NOT include a body or footers unless it's a `BREAKING CHANGE:` footer, which should be part of the description if you choose that method for indicating a breaking change and it's brief. Otherwise, prefer the `!` marker.
30
+
31
+ **IMPORTANT - Output Format:**
32
+ - Your ENTIRE response must be ONLY a JSON array of objects, with NO wrapping object.
33
+ - DO NOT wrap the array in a JSON object with a "suggestions" key.
34
+ - Each object in the array MUST have a "message" property containing the commit message.
35
+ - The array must contain exactly {{ suggestions }} objects.
36
+ - DO NOT include any explanations, markdown, or additional text before or after the JSON array.
37
+ - DO NOT use triple backticks to format the JSON.
38
+
39
+ **CORRECT format:**
40
+ [
41
+ { "message": "feat(api): add user profile endpoint" },
42
+ { "message": "docs: update API documentation for user profile" }
43
+ ]
44
+
45
+ **INCORRECT format (DO NOT USE):**
46
+ {
47
+ "suggestions": [
48
+ { "message": "feat(api): add user profile endpoint" },
49
+ { "message": "docs: update API documentation for user profile" }
50
+ ]
51
+ }
@@ -0,0 +1,56 @@
1
+ You are an expert developer writing a Pull-Request description for code reviewers.
2
+
3
+ ##############################
4
+ ## INPUT DATA ##
5
+ ##############################
6
+
7
+ <<<DIFF>>>
8
+ {{ diff }}
9
+ <<<END DIFF>>>
10
+
11
+ {{#if tree}}
12
+ <<<FILE_TREE>>>
13
+ {{ tree }}
14
+ <<<END FILE_TREE>>>
15
+ {{/if}}
16
+
17
+ Branch : {{ branch }}
18
+ Target : {{ target }}
19
+
20
+ {{#if commits}}
21
+ <<<COMMITS>>>
22
+ {{ commits }}
23
+ <<<END COMMITS>>>
24
+ {{/if}}
25
+
26
+ ##############################
27
+ ## TASK ##
28
+ ##############################
29
+
30
+ Generate **one** PR draft that:
31
+
32
+ 1. Starts with a concise **title** following
33
+ `<type>(optional-scope): <description>`
34
+ where **type** ∈
35
+ `feat | fix | docs | style | refactor | perf | test | build | ci | chore`.
36
+
37
+ 2. Provides a **body** in Markdown with these sections *(omit a section if empty)*
38
+ - **Overview** – why the change is needed (≤ 2 sentences).
39
+ - **Key Changes** – bullet list of the most important code areas / commits.
40
+ - **Implementation Notes** – notable design decisions.
41
+ - **Migration / Checklist** – steps for users or follow-ups (optional).
42
+
43
+ Constraints
44
+ - Keep the title ≤ 60 characters, imperative mood (“add X”, not “added X”).
45
+ - Summarise; do **not** dump the entire diff again.
46
+
47
+ ##############################
48
+ ## OUTPUT ONLY ##
49
+ ##############################
50
+
51
+ Return **exactly** one JSON object **without** markdown fences or extra text:
52
+
53
+ {
54
+ "title": "<generated-title>",
55
+ "body": "<markdown-body>"
56
+ }
@@ -0,0 +1,5 @@
1
+ # Local environment variables
2
+ .env
3
+ __pycache__
4
+ .venv
5
+ .gitai/pr_docs
@@ -0,0 +1 @@
1
+ 3.10
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 mcp_chatbot
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,23 @@
1
+ Metadata-Version: 2.4
2
+ Name: mseep-mcp-simple-chatbot
3
+ Version: 0.1.1
4
+ Summary: A simple CLI chatbot using the Model Context Protocol (MCP)
5
+ Author-email: mseep <support@skydeck.ai>
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Keywords: chatbot,cli,llm,mcp
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Requires-Python: >=3.10
15
+ Requires-Dist: mcp[cli]>=1.0.0
16
+ Requires-Dist: openai>=1.66.3
17
+ Requires-Dist: python-dotenv>=1.0.0
18
+ Requires-Dist: requests>=2.31.0
19
+ Requires-Dist: streamlit>=1.43.2
20
+ Requires-Dist: uvicorn>=0.32.1
21
+ Description-Content-Type: text/plain
22
+
23
+ Package managed by MseeP.ai
@@ -0,0 +1,268 @@
1
+ # MCPChatbot Example
2
+
3
+ ![MCP Chatbot](assets/mcp_chatbot_logo.png)
4
+
5
+ This project demonstrates how to integrate the Model Context Protocol (MCP) with customized LLM (e.g. Qwen), creating a powerful chatbot that can interact with various tools through MCP servers. The implementation showcases the flexibility of MCP by enabling LLMs to use external tools seamlessly.
6
+
7
+ > [!TIP]
8
+ > For Chinese version, please refer to [README_ZH.md](README_ZH.md).
9
+
10
+ ## Overview
11
+
12
+ **Chatbot Streamlit Example**
13
+
14
+ <img src="assets/mcp_chatbot_streamlit_demo_low.gif" width="800">
15
+
16
+ **Workflow Tracer Example**
17
+
18
+ <img src="assets/single_prompt_demo.png" width="800">
19
+
20
+ - 🚩 Update (2025-04-11):
21
+ - Added chatbot streamlit example.
22
+ - 🚩 Update (2025-04-10):
23
+ - More complex LLM response parsing, supporting multiple MCP tool calls and multiple chat iterations.
24
+ - Added single prompt examples with both regular and streaming modes.
25
+ - Added interactive terminal chatbot examples.
26
+
27
+ This project includes:
28
+
29
+ - Simple/Complex CLI chatbot interface
30
+ - Integration with some builtin MCP Server like (Markdown processing tools)
31
+ - Support for customized LLM (e.g. Qwen) and Ollama
32
+ - Example scripts for single prompt processing in both regular and streaming modes
33
+ - Interactive terminal chatbot with regular and streaming response modes
34
+
35
+ ## Requirements
36
+
37
+ - Python 3.10+
38
+ - Dependencies (automatically installed via requirements):
39
+ - python-dotenv
40
+ - mcp[cli]
41
+ - openai
42
+ - colorama
43
+
44
+ ## Installation
45
+
46
+ 1. **Clone the repository:**
47
+
48
+ ```bash
49
+ git clone git@github.com:keli-wen/mcp_chatbot.git
50
+ cd mcp_chatbot
51
+ ```
52
+
53
+ 2. **Set up a virtual environment (recommended):**
54
+
55
+ ```bash
56
+ cd folder
57
+
58
+ # Install uv if you don't have it already
59
+ pip install uv
60
+
61
+ # Create a virtual environment and install dependencies
62
+ uv venv .venv --python=3.10
63
+
64
+ # Activate the virtual environment
65
+ # For macOS/Linux
66
+ source .venv/bin/activate
67
+ # For Windows
68
+ .venv\Scripts\activate
69
+
70
+ # Deactivate the virtual environment
71
+ deactivate
72
+ ```
73
+
74
+ 3. **Install dependencies:**
75
+
76
+ ```bash
77
+ pip install -r requirements.txt
78
+ # or use uv for faster installation
79
+ uv pip install -r requirements.txt
80
+ ```
81
+
82
+ 4. **Configure your environment:**
83
+ - Copy the `.env.example` file to `.env`:
84
+
85
+ ```bash
86
+ cp .env.example .env
87
+ ```
88
+
89
+ - Edit the `.env` file to add your Qwen API key (just for demo, you can use any LLM API key, remember to set the base_url and api_key in the .env file) and set the paths:
90
+
91
+ ```
92
+ LLM_MODEL_NAME=your_llm_model_name_here
93
+ LLM_BASE_URL=your_llm_base_url_here
94
+ LLM_API_KEY=your_llm_api_key_here
95
+ OLLAMA_MODEL_NAME=your_ollama_model_name_here
96
+ OLLAMA_BASE_URL=your_ollama_base_url_here
97
+ MARKDOWN_FOLDER_PATH=/path/to/your/markdown/folder
98
+ RESULT_FOLDER_PATH=/path/to/your/result/folder
99
+ ```
100
+
101
+ ## Important Configuration Notes ⚠️
102
+
103
+ Before running the application, you need to modify the following:
104
+
105
+ 1. **MCP Server Configuration**:
106
+ Edit `mcp_servers/servers_config.json` to match your local setup:
107
+
108
+ ```json
109
+ {
110
+ "mcpServers": {
111
+ "markdown_processor": {
112
+ "command": "/path/to/your/uv",
113
+ "args": [
114
+ "--directory",
115
+ "/path/to/your/project/mcp_servers",
116
+ "run",
117
+ "markdown_processor.py"
118
+ ]
119
+ }
120
+ }
121
+ }
122
+ ```
123
+
124
+ Replace `/path/to/your/uv` with the actual path to your uv executable. **You can use `which uv` to get the path**.
125
+ Replace `/path/to/your/project/mcp_servers` with the absolute path to the mcp_servers directory in your project. (For **Windows** users, you can take a look at the example in the [Troubleshooting](#troubleshooting) section)
126
+
127
+ 2. **Environment Variables**:
128
+ Make sure to set proper paths in your `.env` file:
129
+
130
+ ```
131
+ MARKDOWN_FOLDER_PATH="/path/to/your/markdown/folder"
132
+ RESULT_FOLDER_PATH="/path/to/your/result/folder"
133
+ ```
134
+
135
+ The application will validate these paths and throw an error if they contain placeholder values.
136
+
137
+ You can run the following command to check your configuration:
138
+
139
+ ```bash
140
+ bash scripts/check.sh
141
+ ```
142
+
143
+ ## Usage
144
+
145
+ ### Unit Test
146
+
147
+ You can run the following command to run the unit test:
148
+
149
+ ```bash
150
+ bash scripts/unittest.sh
151
+ ```
152
+
153
+ ### Examples
154
+
155
+ #### Single Prompt Examples
156
+
157
+ The project includes two single prompt examples:
158
+
159
+ 1. **Regular Mode**: Process a single prompt and display the complete response
160
+ ```bash
161
+ python example/single_prompt/single_prompt.py
162
+ ```
163
+
164
+ 2. **Streaming Mode**: Process a single prompt with real-time streaming output
165
+ ```bash
166
+ python example/single_prompt/single_prompt_stream.py
167
+ ```
168
+
169
+ Both examples accept an optional `--llm` parameter to specify which LLM provider to use:
170
+ ```bash
171
+ python example/single_prompt/single_prompt.py --llm=ollama
172
+ ```
173
+
174
+ > [!NOTE]
175
+ > For more details, see the [Single Prompt Example README](example/single_prompt/README.md).
176
+
177
+ #### Terminal Chatbot Examples
178
+
179
+ The project includes two interactive terminal chatbot examples:
180
+
181
+ 1. **Regular Mode**: Interactive terminal chat with complete responses
182
+ ```bash
183
+ python example/chatbot_terminal/chatbot_terminal.py
184
+ ```
185
+
186
+ 2. **Streaming Mode**: Interactive terminal chat with streaming responses
187
+ ```bash
188
+ python example/chatbot_terminal/chatbot_terminal_stream.py
189
+ ```
190
+
191
+ Both examples accept an optional `--llm` parameter to specify which LLM provider to use:
192
+ ```bash
193
+ python example/chatbot_terminal/chatbot_terminal.py --llm=ollama
194
+ ```
195
+
196
+ > [!NOTE]
197
+ > For more details, see the [Terminal Chatbot Example README](example/chatbot_terminal/README.md).
198
+
199
+ #### Streamlit Web Chatbot Example
200
+
201
+ The project includes an interactive web-based chatbot example using Streamlit:
202
+
203
+ ```bash
204
+ streamlit run example/chatbot_streamlit/app.py
205
+ ```
206
+
207
+ This example features:
208
+ - Interactive chat interface.
209
+ - Real-time streaming responses.
210
+ - Detailed MCP tool workflow visualization.
211
+ - Configurable LLM settings (OpenAI/Ollama) and MCP tool display via the sidebar.
212
+
213
+ ![MCP Chatbot Streamlit Demo](assets/chatbot_streamlit_demo_light.png)
214
+
215
+ > [!NOTE]
216
+ > For more details, see the [Streamlit Chatbot Example README](example/chatbot_streamlit/README.md).
217
+
218
+ </details>
219
+
220
+ ## Project Structure
221
+
222
+ - `mcp_chatbot/`: Core library code
223
+ - `chat/`: Chat session management
224
+ - `config/`: Configuration handling
225
+ - `llm/`: LLM client implementation
226
+ - `mcp/`: MCP client and tool integration
227
+ - `utils/`: Utility functions (e.g. `WorkflowTrace` and `StreamPrinter`)
228
+ - `mcp_servers/`: Custom MCP servers implementation
229
+ - `markdown_processor.py`: Server for processing Markdown files
230
+ - `servers_config.json`: Configuration for MCP servers
231
+ - `data-example/`: Example Markdown files for testing
232
+ - `example/`: Example scripts for different use cases
233
+ - `single_prompt/`: Single prompt processing examples (regular and streaming)
234
+ - `chatbot_terminal/`: Interactive terminal chatbot examples (regular and streaming)
235
+ - `chatbot_streamlit/`: Interactive web chatbot example using Streamlit
236
+
237
+ ## Extending the Project
238
+
239
+ You can extend this project by:
240
+
241
+ 1. Adding new MCP servers in the `mcp_servers/` directory
242
+ 2. Updating the `servers_config.json` to include your new servers
243
+ 3. Implementing new functionalities in the existing servers
244
+ 4. Creating new examples based on the provided templates
245
+
246
+ ## Troubleshooting
247
+
248
+ For Windows users, you can take the following `servers_config.json` as an example:
249
+
250
+ ```json
251
+ {
252
+ "mcpServers": {
253
+ "markdown_processor": {
254
+ "command": "C:\\Users\\13430\\.local\\bin\\uv.exe",
255
+ "args": [
256
+ "--directory",
257
+ "C:\\Users\\13430\\mcp_chatbot\\mcp_servers",
258
+ "run",
259
+ "markdown_processor.py"
260
+ ]
261
+ }
262
+ }
263
+ }
264
+ ```
265
+
266
+ - **Path Issues**: Ensure all paths in the configuration files are absolute paths appropriate for your system
267
+ - **MCP Server Errors**: Make sure the tools are properly installed and configured
268
+ - **API Key Errors**: Verify your API key is correctly set in the `.env` file