mcp-use 1.0.3__tar.gz → 1.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

Files changed (66) hide show
  1. mcp_use-1.1.5/.github/ISSUE_TEMPLATE/bug_report.md +38 -0
  2. {mcp_use-1.0.3 → mcp_use-1.1.5}/PKG-INFO +112 -12
  3. {mcp_use-1.0.3 → mcp_use-1.1.5}/README.md +111 -11
  4. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/introduction.mdx +3 -0
  5. mcp_use-1.1.5/docs/logo/dark.svg +7 -0
  6. mcp_use-1.1.5/docs/logo/light.svg +7 -0
  7. mcp_use-1.1.5/docs/quickstart.mdx +234 -0
  8. mcp_use-1.1.5/examples/http_example.py +53 -0
  9. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/agents/langchain_agent.py +3 -3
  10. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/agents/mcpagent.py +7 -7
  11. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/client.py +3 -3
  12. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/connectors/base.py +5 -4
  13. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/connectors/http.py +2 -2
  14. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/connectors/stdio.py +2 -2
  15. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/connectors/websocket.py +6 -6
  16. {mcp_use-1.0.3 → mcp_use-1.1.5}/pyproject.toml +1 -1
  17. mcp_use-1.1.5/static/image.jpg +0 -0
  18. mcp_use-1.0.3/docs/logo/dark.svg +0 -12
  19. mcp_use-1.0.3/docs/logo/light.svg +0 -12
  20. mcp_use-1.0.3/docs/quickstart.mdx +0 -138
  21. mcp_use-1.0.3/static/image.jpg +0 -0
  22. {mcp_use-1.0.3 → mcp_use-1.1.5}/.github/workflows/publish.yml +0 -0
  23. {mcp_use-1.0.3 → mcp_use-1.1.5}/.github/workflows/tests.yml +0 -0
  24. {mcp_use-1.0.3 → mcp_use-1.1.5}/.gitignore +0 -0
  25. {mcp_use-1.0.3 → mcp_use-1.1.5}/.pre-commit-config.yaml +0 -0
  26. {mcp_use-1.0.3 → mcp_use-1.1.5}/LICENSE +0 -0
  27. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/README.md +0 -0
  28. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/api-reference/introduction.mdx +0 -0
  29. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/development.mdx +0 -0
  30. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/docs.json +0 -0
  31. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/essentials/configuration.mdx +0 -0
  32. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/essentials/connection-types.mdx +0 -0
  33. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/essentials/llm-integration.mdx +0 -0
  34. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/favicon.svg +0 -0
  35. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/images/hero-dark.png +0 -0
  36. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/images/hero-light.png +0 -0
  37. {mcp_use-1.0.3 → mcp_use-1.1.5}/docs/snippets/snippet-intro.mdx +0 -0
  38. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/airbnb_mcp.json +0 -0
  39. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/airbnb_use.py +0 -0
  40. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/blender_use.py +0 -0
  41. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/browser_mcp.json +0 -0
  42. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/browser_use.py +0 -0
  43. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/chat_example.py +0 -0
  44. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/filesystem_use.py +0 -0
  45. {mcp_use-1.0.3 → mcp_use-1.1.5}/examples/multi_server_example.py +0 -0
  46. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/__init__.py +0 -0
  47. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/agents/__init__.py +0 -0
  48. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/agents/base.py +0 -0
  49. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/agents/prompts/default.py +0 -0
  50. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/config.py +0 -0
  51. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/connectors/__init__.py +0 -0
  52. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/logging.py +0 -0
  53. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/session.py +0 -0
  54. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/task_managers/__init__.py +0 -0
  55. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/task_managers/base.py +0 -0
  56. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/task_managers/sse.py +0 -0
  57. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/task_managers/stdio.py +0 -0
  58. {mcp_use-1.0.3 → mcp_use-1.1.5}/mcp_use/task_managers/websocket.py +0 -0
  59. {mcp_use-1.0.3 → mcp_use-1.1.5}/pytest.ini +0 -0
  60. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/conftest.py +0 -0
  61. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_client.py +0 -0
  62. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_config.py +0 -0
  63. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_http_connector.py +0 -0
  64. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_logging.py +0 -0
  65. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_session.py +0 -0
  66. {mcp_use-1.0.3 → mcp_use-1.1.5}/tests/unit/test_stdio_connector.py +0 -0
@@ -0,0 +1,38 @@
1
+ ---
2
+ name: Bug report
3
+ about: Create a report to help us improve
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Describe the bug**
11
+ A clear and concise description of what the bug is.
12
+
13
+ **To Reproduce**
14
+ Steps to reproduce the behavior:
15
+ 1. Go to '...'
16
+ 2. Click on '....'
17
+ 3. Scroll down to '....'
18
+ 4. See error
19
+
20
+ **Expected behavior**
21
+ A clear and concise description of what you expected to happen.
22
+
23
+ **Screenshots**
24
+ If applicable, add screenshots to help explain your problem.
25
+
26
+ **Desktop (please complete the following information):**
27
+ - OS: [e.g. iOS]
28
+ - Browser [e.g. chrome, safari]
29
+ - Version [e.g. 22]
30
+
31
+ **Smartphone (please complete the following information):**
32
+ - Device: [e.g. iPhone6]
33
+ - OS: [e.g. iOS8.1]
34
+ - Browser [e.g. stock browser, safari]
35
+ - Version [e.g. 22]
36
+
37
+ **Additional context**
38
+ Add any other context about the problem here.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.0.3
3
+ Version: 1.1.5
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -56,6 +56,19 @@ Description-Content-Type: text/markdown
56
56
 
57
57
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
58
58
 
59
+ # Features
60
+
61
+ ## ✨ Key Features
62
+
63
+ | Feature | Description |
64
+ |---------|-------------|
65
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
66
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
67
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
68
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
69
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
70
+
71
+
59
72
  # Quick start
60
73
 
61
74
  With pip:
@@ -72,7 +85,30 @@ cd mcp-use
72
85
  pip install -e .
73
86
  ```
74
87
 
75
- Spin up your agent:
88
+ ### Installing LangChain Providers
89
+
90
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
91
+
92
+ ```bash
93
+ # For OpenAI
94
+ pip install langchain-openai
95
+
96
+ # For Anthropic
97
+ pip install langchain-anthropic
98
+
99
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
100
+ ```
101
+
102
+ and add your API keys for the provider you want to use to your `.env` file.
103
+
104
+ ```bash
105
+ OPENAI_API_KEY=
106
+ ANTHROPIC_API_KEY=
107
+ ```
108
+
109
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
110
+
111
+ ### Spin up your agent:
76
112
 
77
113
  ```python
78
114
  import asyncio
@@ -85,8 +121,21 @@ async def main():
85
121
  # Load environment variables
86
122
  load_dotenv()
87
123
 
88
- # Create MCPClient from config file
89
- client = MCPClient.from_config_file("browser_mcp.json")
124
+ # Create configuration dictionary
125
+ config = {
126
+ "mcpServers": {
127
+ "playwright": {
128
+ "command": "npx",
129
+ "args": ["@playwright/mcp@latest"],
130
+ "env": {
131
+ "DISPLAY": ":1"
132
+ }
133
+ }
134
+ }
135
+ }
136
+
137
+ # Create MCPClient from configuration dictionary
138
+ client = MCPClient.from_dict(config)
90
139
 
91
140
  # Create LLM
92
141
  llm = ChatOpenAI(model="gpt-4o")
@@ -96,7 +145,7 @@ async def main():
96
145
 
97
146
  # Run the query
98
147
  result = await agent.run(
99
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
148
+ "Find the best restaurant in San Francisco",
100
149
  )
101
150
  print(f"\nResult: {result}")
102
151
 
@@ -104,6 +153,14 @@ if __name__ == "__main__":
104
153
  asyncio.run(main())
105
154
  ```
106
155
 
156
+ You can also add the servers configuration from a config file like this:
157
+
158
+ ```python
159
+ client = MCPClient.from_config_file(
160
+ os.path.join("browser_mcp.json")
161
+ )
162
+ ```
163
+
107
164
  Example configuration file (`browser_mcp.json`):
108
165
 
109
166
  ```json
@@ -120,15 +177,9 @@ Example configuration file (`browser_mcp.json`):
120
177
  }
121
178
  ```
122
179
 
123
- Add your API keys for the provider you want to use to your `.env` file.
124
-
125
- ```bash
126
- OPENAI_API_KEY=
127
- ANTHROPIC_API_KEY=
128
- ```
129
-
130
180
  For other settings, models, and more, check out the documentation.
131
181
 
182
+
132
183
  # Example Use Cases
133
184
 
134
185
  ## Web Browsing with Playwright
@@ -286,6 +337,55 @@ if __name__ == "__main__":
286
337
  asyncio.run(main())
287
338
  ```
288
339
 
340
+ ## HTTP Connection Example
341
+
342
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
343
+
344
+ Here's an example of how to use the HTTP connection feature:
345
+
346
+ ```python
347
+ import asyncio
348
+ import os
349
+ from dotenv import load_dotenv
350
+ from langchain_openai import ChatOpenAI
351
+ from mcp_use import MCPAgent, MCPClient
352
+
353
+ async def main():
354
+ """Run the example using a configuration file."""
355
+ # Load environment variables
356
+ load_dotenv()
357
+
358
+ config = {
359
+ "mcpServers": {
360
+ "http": {
361
+ "url": "http://localhost:8931/sse"
362
+ }
363
+ }
364
+ }
365
+
366
+ # Create MCPClient from config file
367
+ client = MCPClient.from_dict(config)
368
+
369
+ # Create LLM
370
+ llm = ChatOpenAI(model="gpt-4o")
371
+
372
+ # Create agent with the client
373
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
374
+
375
+ # Run the query
376
+ result = await agent.run(
377
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
378
+ max_steps=30,
379
+ )
380
+ print(f"\nResult: {result}")
381
+
382
+ if __name__ == "__main__":
383
+ # Run the appropriate example
384
+ asyncio.run(main())
385
+ ```
386
+
387
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
388
+
289
389
  # Multi-Server Support
290
390
 
291
391
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -17,6 +17,19 @@
17
17
 
18
18
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
19
19
 
20
+ # Features
21
+
22
+ ## ✨ Key Features
23
+
24
+ | Feature | Description |
25
+ |---------|-------------|
26
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
27
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
28
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
29
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
30
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
31
+
32
+
20
33
  # Quick start
21
34
 
22
35
  With pip:
@@ -33,7 +46,30 @@ cd mcp-use
33
46
  pip install -e .
34
47
  ```
35
48
 
36
- Spin up your agent:
49
+ ### Installing LangChain Providers
50
+
51
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
52
+
53
+ ```bash
54
+ # For OpenAI
55
+ pip install langchain-openai
56
+
57
+ # For Anthropic
58
+ pip install langchain-anthropic
59
+
60
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
61
+ ```
62
+
63
+ and add your API keys for the provider you want to use to your `.env` file.
64
+
65
+ ```bash
66
+ OPENAI_API_KEY=
67
+ ANTHROPIC_API_KEY=
68
+ ```
69
+
70
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
71
+
72
+ ### Spin up your agent:
37
73
 
38
74
  ```python
39
75
  import asyncio
@@ -46,8 +82,21 @@ async def main():
46
82
  # Load environment variables
47
83
  load_dotenv()
48
84
 
49
- # Create MCPClient from config file
50
- client = MCPClient.from_config_file("browser_mcp.json")
85
+ # Create configuration dictionary
86
+ config = {
87
+ "mcpServers": {
88
+ "playwright": {
89
+ "command": "npx",
90
+ "args": ["@playwright/mcp@latest"],
91
+ "env": {
92
+ "DISPLAY": ":1"
93
+ }
94
+ }
95
+ }
96
+ }
97
+
98
+ # Create MCPClient from configuration dictionary
99
+ client = MCPClient.from_dict(config)
51
100
 
52
101
  # Create LLM
53
102
  llm = ChatOpenAI(model="gpt-4o")
@@ -57,7 +106,7 @@ async def main():
57
106
 
58
107
  # Run the query
59
108
  result = await agent.run(
60
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
109
+ "Find the best restaurant in San Francisco",
61
110
  )
62
111
  print(f"\nResult: {result}")
63
112
 
@@ -65,6 +114,14 @@ if __name__ == "__main__":
65
114
  asyncio.run(main())
66
115
  ```
67
116
 
117
+ You can also add the servers configuration from a config file like this:
118
+
119
+ ```python
120
+ client = MCPClient.from_config_file(
121
+ os.path.join("browser_mcp.json")
122
+ )
123
+ ```
124
+
68
125
  Example configuration file (`browser_mcp.json`):
69
126
 
70
127
  ```json
@@ -81,15 +138,9 @@ Example configuration file (`browser_mcp.json`):
81
138
  }
82
139
  ```
83
140
 
84
- Add your API keys for the provider you want to use to your `.env` file.
85
-
86
- ```bash
87
- OPENAI_API_KEY=
88
- ANTHROPIC_API_KEY=
89
- ```
90
-
91
141
  For other settings, models, and more, check out the documentation.
92
142
 
143
+
93
144
  # Example Use Cases
94
145
 
95
146
  ## Web Browsing with Playwright
@@ -247,6 +298,55 @@ if __name__ == "__main__":
247
298
  asyncio.run(main())
248
299
  ```
249
300
 
301
+ ## HTTP Connection Example
302
+
303
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
304
+
305
+ Here's an example of how to use the HTTP connection feature:
306
+
307
+ ```python
308
+ import asyncio
309
+ import os
310
+ from dotenv import load_dotenv
311
+ from langchain_openai import ChatOpenAI
312
+ from mcp_use import MCPAgent, MCPClient
313
+
314
+ async def main():
315
+ """Run the example using a configuration file."""
316
+ # Load environment variables
317
+ load_dotenv()
318
+
319
+ config = {
320
+ "mcpServers": {
321
+ "http": {
322
+ "url": "http://localhost:8931/sse"
323
+ }
324
+ }
325
+ }
326
+
327
+ # Create MCPClient from config file
328
+ client = MCPClient.from_dict(config)
329
+
330
+ # Create LLM
331
+ llm = ChatOpenAI(model="gpt-4o")
332
+
333
+ # Create agent with the client
334
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
335
+
336
+ # Run the query
337
+ result = await agent.run(
338
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
339
+ max_steps=30,
340
+ )
341
+ print(f"\nResult: {result}")
342
+
343
+ if __name__ == "__main__":
344
+ # Run the appropriate example
345
+ asyncio.run(main())
346
+ ```
347
+
348
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
349
+
250
350
  # Multi-Server Support
251
351
 
252
352
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -25,6 +25,9 @@ mcp_use is an open source library that enables developers to connect any Languag
25
25
  <Card title="Universal LLM Support" icon="robot" href="/essentials/llm-integration">
26
26
  Compatible with any LangChain-supported LLM provider
27
27
  </Card>
28
+ <Card title="HTTP Connection" icon="network" href="/quickstart">
29
+ Connect to MCP servers running on specific HTTP ports for web-based integrations
30
+ </Card>
28
31
  </CardGroup>
29
32
 
30
33
  ## Getting Started
@@ -0,0 +1,7 @@
1
+ <svg width="303" height="303" viewBox="0 0 303 303" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <path d="M106.066 106.066C86.5398 125.592 54.8816 125.592 35.3554 106.066V106.066C15.8291 86.5397 15.8291 54.8815 35.3554 35.3552V35.3552C54.8816 15.829 86.5398 15.829 106.066 35.3552V35.3552C125.592 54.8815 125.592 86.5397 106.066 106.066V106.066Z" fill="white"/>
3
+ <path d="M267.286 267.286C247.76 286.812 216.102 286.812 196.576 267.286V267.286C177.049 247.76 177.049 216.102 196.576 196.576V196.576C216.102 177.049 247.76 177.049 267.286 196.576V196.576C286.813 216.102 286.813 247.76 267.286 267.286V267.286Z" fill="white"/>
4
+ <path fill-rule="evenodd" clip-rule="evenodd" d="M181.957 230.04L211.425 259.508L260.922 210.011L232.851 181.94C204.215 181.726 175.645 170.695 153.796 148.846C131.947 126.997 120.915 98.4264 120.702 69.7903L92.631 41.7193L43.1335 91.2168L72.6014 120.685C100.313 121.56 127.765 132.573 148.917 153.725C170.069 174.877 181.082 202.328 181.957 230.04Z" fill="white"/>
5
+ <circle cx="70.3209" cy="232.321" r="50" fill="white"/>
6
+ <circle cx="232.321" cy="70.3209" r="50" fill="white"/>
7
+ </svg>
@@ -0,0 +1,7 @@
1
+ <svg width="303" height="303" viewBox="0 0 303 303" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <path d="M106.066 106.066C86.5398 125.592 54.8816 125.592 35.3554 106.066V106.066C15.8291 86.5397 15.8291 54.8815 35.3554 35.3552V35.3552C54.8816 15.829 86.5398 15.829 106.066 35.3552V35.3552C125.592 54.8815 125.592 86.5397 106.066 106.066V106.066Z" fill="black"/>
3
+ <path d="M267.286 267.286C247.76 286.812 216.102 286.812 196.576 267.286V267.286C177.049 247.76 177.049 216.102 196.576 196.576V196.576C216.102 177.049 247.76 177.049 267.286 196.576V196.576C286.813 216.102 286.813 247.76 267.286 267.286V267.286Z" fill="black"/>
4
+ <path fill-rule="evenodd" clip-rule="evenodd" d="M181.957 230.04L211.425 259.508L260.922 210.011L232.851 181.94C204.215 181.726 175.645 170.695 153.796 148.846C131.947 126.997 120.915 98.4264 120.702 69.7903L92.631 41.7193L43.1335 91.2168L72.6014 120.685C100.313 121.56 127.765 132.573 148.917 153.725C170.069 174.877 181.082 202.328 181.957 230.04Z" fill="black"/>
5
+ <circle cx="70.3209" cy="232.321" r="50" fill="black"/>
6
+ <circle cx="232.321" cy="70.3209" r="50" fill="black"/>
7
+ </svg>
@@ -0,0 +1,234 @@
1
+ ---
2
+ title: Quickstart
3
+ description: "Get started with mcp_use in minutes"
4
+ ---
5
+
6
+ # Quickstart Guide
7
+
8
+ This guide will help you get started with mcp_use quickly. We'll cover installation, basic configuration, and running your first agent.
9
+
10
+ ## Installation
11
+
12
+ You can install mcp_use using pip:
13
+
14
+ ```bash
15
+ pip install mcp-use
16
+ ```
17
+
18
+ Or install from source:
19
+
20
+ ```bash
21
+ git clone https://github.com/pietrozullo/mcp-use.git
22
+ cd mcp-use
23
+ pip install -e .
24
+ ```
25
+
26
+ ## Installing LangChain Providers
27
+
28
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
29
+
30
+ ```bash
31
+ # For OpenAI
32
+ pip install langchain-openai
33
+
34
+ # For Anthropic
35
+ pip install langchain-anthropic
36
+
37
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
38
+ ```
39
+
40
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
41
+
42
+ ## Environment Setup
43
+
44
+ Set up your environment variables in a `.env` file:
45
+
46
+ ```bash
47
+ OPENAI_API_KEY=your_api_key_here
48
+ ANTHROPIC_API_KEY=your_api_key_here
49
+ ```
50
+
51
+ ## Your First Agent
52
+
53
+ Here's a simple example to get you started:
54
+
55
+ ```python
56
+ import asyncio
57
+ import os
58
+ from dotenv import load_dotenv
59
+ from langchain_openai import ChatOpenAI
60
+ from mcp_use import MCPAgent, MCPClient
61
+
62
+ async def main():
63
+ # Load environment variables
64
+ load_dotenv()
65
+
66
+ # Create configuration dictionary
67
+ config = {
68
+ "mcpServers": {
69
+ "playwright": {
70
+ "command": "npx",
71
+ "args": ["@playwright/mcp@latest"],
72
+ "env": {
73
+ "DISPLAY": ":1"
74
+ }
75
+ }
76
+ }
77
+ }
78
+
79
+ # Create MCPClient from configuration dictionary
80
+ client = MCPClient.from_dict(config)
81
+
82
+ # Create LLM
83
+ llm = ChatOpenAI(model="gpt-4o")
84
+
85
+ # Create agent with the client
86
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
87
+
88
+ # Run the query
89
+ result = await agent.run(
90
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
91
+ )
92
+ print(f"\nResult: {result}")
93
+
94
+ if __name__ == "__main__":
95
+ asyncio.run(main())
96
+ ```
97
+
98
+ ## Configuration Options
99
+
100
+ You can also add the servers configuration from a config file:
101
+
102
+ ```python
103
+ client = MCPClient.from_config_file(
104
+ os.path.join("browser_mcp.json")
105
+ )
106
+ ```
107
+
108
+ Example configuration file (`browser_mcp.json`):
109
+
110
+ ```json
111
+ {
112
+ "mcpServers": {
113
+ "playwright": {
114
+ "command": "npx",
115
+ "args": ["@playwright/mcp@latest"],
116
+ "env": {
117
+ "DISPLAY": ":1"
118
+ }
119
+ }
120
+ }
121
+ }
122
+ ```
123
+
124
+ ## Restricting Tool Access
125
+
126
+ You can control which tools are available to the agent:
127
+
128
+ ```python
129
+ import asyncio
130
+ import os
131
+ from dotenv import load_dotenv
132
+ from langchain_openai import ChatOpenAI
133
+ from mcp_use import MCPAgent, MCPClient
134
+
135
+ async def main():
136
+ # Load environment variables
137
+ load_dotenv()
138
+
139
+ # Create configuration dictionary
140
+ config = {
141
+ "mcpServers": {
142
+ "playwright": {
143
+ "command": "npx",
144
+ "args": ["@playwright/mcp@latest"],
145
+ "env": {
146
+ "DISPLAY": ":1"
147
+ }
148
+ }
149
+ }
150
+ }
151
+
152
+ # Create MCPClient from configuration dictionary
153
+ client = MCPClient.from_dict(config)
154
+
155
+ # Create LLM
156
+ llm = ChatOpenAI(model="gpt-4o")
157
+
158
+ # Create agent with restricted tools
159
+ agent = MCPAgent(
160
+ llm=llm,
161
+ client=client,
162
+ max_steps=30,
163
+ disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
164
+ )
165
+
166
+ # Run the query
167
+ result = await agent.run(
168
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
169
+ )
170
+ print(f"\nResult: {result}")
171
+
172
+ if __name__ == "__main__":
173
+ asyncio.run(main())
174
+ ```
175
+
176
+ ## Available MCP Servers
177
+
178
+ mcp_use supports any MCP server, allowing you to connect to a wide range of server implementations. For a comprehensive list of available servers, check out the [awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers) repository.
179
+
180
+
181
+ Each server requires its own configuration. Check the [Configuration Guide](/essentials/configuration) for details.
182
+
183
+ ## HTTP Connection
184
+
185
+ mcp_use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
186
+
187
+ Here's a simple example to get you started with HTTP connections:
188
+
189
+ ```python
190
+ import asyncio
191
+ import os
192
+ from dotenv import load_dotenv
193
+ from langchain_openai import ChatOpenAI
194
+ from mcp_use import MCPAgent, MCPClient
195
+
196
+ async def main():
197
+ # Load environment variables
198
+ load_dotenv()
199
+
200
+ # Create configuration dictionary
201
+ config = {
202
+ "mcpServers": {
203
+ "http": {
204
+ "url": "http://localhost:8931/sse"
205
+ }
206
+ }
207
+ }
208
+
209
+ # Create MCPClient from configuration dictionary
210
+ client = MCPClient.from_dict(config)
211
+
212
+ # Create LLM
213
+ llm = ChatOpenAI(model="gpt-4o")
214
+
215
+ # Create agent with the client
216
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
217
+
218
+ # Run the query
219
+ result = await agent.run(
220
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
221
+ )
222
+ print(f"\nResult: {result}")
223
+
224
+ if __name__ == "__main__":
225
+ asyncio.run(main())
226
+ ```
227
+
228
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
229
+
230
+ ## Next Steps
231
+
232
+ - Learn about [Configuration Options](/essentials/configuration)
233
+ - Explore [Example Use Cases](/examples)
234
+ - Check out [Advanced Features](/essentials/advanced)
@@ -0,0 +1,53 @@
1
+ """
2
+ HTTP Example for mcp_use.
3
+
4
+ This example demonstrates how to use the mcp_use library with MCPClient
5
+ to connect to an MCP server running on a specific HTTP port.
6
+
7
+ Before running this example, you need to start the Playwright MCP server
8
+ in another terminal with:
9
+
10
+ npx @playwright/mcp@latest --port 8931
11
+
12
+ This will start the server on port 8931. Resulting in the config you find below.
13
+ Of course you can run this with any server you want at any URL.
14
+
15
+ Special thanks to https://github.com/microsoft/playwright-mcp for the server.
16
+
17
+ """
18
+
19
+ import asyncio
20
+
21
+ from dotenv import load_dotenv
22
+ from langchain_openai import ChatOpenAI
23
+
24
+ from mcp_use import MCPAgent, MCPClient
25
+
26
+
27
+ async def main():
28
+ """Run the example using a configuration file."""
29
+ # Load environment variables
30
+ load_dotenv()
31
+
32
+ config = {"mcpServers": {"http": {"url": "http://localhost:8931/sse"}}}
33
+
34
+ # Create MCPClient from config file
35
+ client = MCPClient.from_dict(config)
36
+
37
+ # Create LLM
38
+ llm = ChatOpenAI(model="gpt-4o")
39
+
40
+ # Create agent with the client
41
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
42
+
43
+ # Run the query
44
+ result = await agent.run(
45
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
46
+ max_steps=30,
47
+ )
48
+ print(f"\nResult: {result}")
49
+
50
+
51
+ if __name__ == "__main__":
52
+ # Run the appropriate example
53
+ asyncio.run(main())
@@ -112,7 +112,7 @@ class LangChainAgent:
112
112
  # Recreate the agent with the new system message if it exists
113
113
  if self.agent and self.tools:
114
114
  self.agent = self._create_agent()
115
- logger.info("Agent recreated with new system message")
115
+ logger.debug("Agent recreated with new system message")
116
116
 
117
117
  async def initialize(self) -> None:
118
118
  """Initialize the agent and its tools."""
@@ -181,7 +181,7 @@ class LangChainAgent:
181
181
  Raises:
182
182
  ToolException: If tool execution fails.
183
183
  """
184
- logger.info(f'MCP tool: "{self.name}" received input: {kwargs}')
184
+ logger.debug(f'MCP tool: "{self.name}" received input: {kwargs}')
185
185
 
186
186
  try:
187
187
  tool_result: CallToolResult = await self.connector.call_tool(
@@ -207,7 +207,7 @@ class LangChainAgent:
207
207
  tools.append(McpToLangChainAdapter())
208
208
 
209
209
  # Log available tools for debugging
210
- logger.info(f"Available tools: {[tool.name for tool in tools]}")
210
+ logger.debug(f"Available tools: {[tool.name for tool in tools]}")
211
211
  return tools
212
212
 
213
213
  def _create_agent(self) -> AgentExecutor:
@@ -209,7 +209,7 @@ class MCPAgent:
209
209
  # If the agent is already initialized, we need to reinitialize it
210
210
  # to apply the changes to the available tools
211
211
  if self._initialized:
212
- logger.info(
212
+ logger.debug(
213
213
  "Agent already initialized. Changes will take effect on next initialization."
214
214
  )
215
215
  # We don't automatically reinitialize here as it could be disruptive
@@ -254,11 +254,11 @@ class MCPAgent:
254
254
  try:
255
255
  # Initialize if needed
256
256
  if manage_connector and (not self._initialized or not self._agent):
257
- logger.info("Initializing agent before running query")
257
+ logger.debug("Initializing agent before running query")
258
258
  await self.initialize()
259
259
  initialized_here = True
260
260
  elif not self._initialized and self.auto_initialize:
261
- logger.info("Auto-initializing agent before running query")
261
+ logger.debug("Auto-initializing agent before running query")
262
262
  await self.initialize()
263
263
  initialized_here = True
264
264
 
@@ -289,7 +289,7 @@ class MCPAgent:
289
289
  # Other message types can be handled here if needed
290
290
 
291
291
  # Run the query with the specified max_steps or default
292
- logger.info(f"Running query with max_steps={max_steps or self.max_steps}")
292
+ logger.debug(f"Running query with max_steps={max_steps or self.max_steps}")
293
293
  result = await self._agent.run(
294
294
  query=query,
295
295
  max_steps=max_steps,
@@ -307,7 +307,7 @@ class MCPAgent:
307
307
  # If we initialized in this method and there was an error,
308
308
  # make sure to clean up
309
309
  if initialized_here and manage_connector:
310
- logger.info("Cleaning up resources after initialization error")
310
+ logger.debug("Cleaning up resources after initialization error")
311
311
  await self.close()
312
312
  raise
313
313
 
@@ -315,7 +315,7 @@ class MCPAgent:
315
315
  # Clean up resources if we're managing the connector and
316
316
  # we're not using a client that manages sessions
317
317
  if manage_connector and not self.client and not initialized_here:
318
- logger.info("Closing agent after query completion")
318
+ logger.debug("Closing agent after query completion")
319
319
  await self.close()
320
320
 
321
321
  async def close(self) -> None:
@@ -338,7 +338,7 @@ class MCPAgent:
338
338
  await connector.disconnect()
339
339
 
340
340
  self._initialized = False
341
- logger.info("Agent closed successfully")
341
+ logger.debug("Agent closed successfully")
342
342
 
343
343
  except Exception as e:
344
344
  logger.error(f"Error during agent closure: {e}")
@@ -214,7 +214,7 @@ class MCPClient:
214
214
 
215
215
  try:
216
216
  # Disconnect from the session
217
- logger.info(f"Closing session for server '{server_name}'")
217
+ logger.debug(f"Closing session for server '{server_name}'")
218
218
  await session.disconnect()
219
219
  except Exception as e:
220
220
  logger.error(f"Error closing session for server '{server_name}': {e}")
@@ -237,7 +237,7 @@ class MCPClient:
237
237
 
238
238
  for server_name in server_names:
239
239
  try:
240
- logger.info(f"Closing session for server '{server_name}'")
240
+ logger.debug(f"Closing session for server '{server_name}'")
241
241
  await self.close_session(server_name)
242
242
  except Exception as e:
243
243
  error_msg = f"Failed to close session for server '{server_name}': {e}"
@@ -248,4 +248,4 @@ class MCPClient:
248
248
  if errors:
249
249
  logger.error(f"Encountered {len(errors)} errors while closing sessions")
250
250
  else:
251
- logger.info("All sessions closed successfully")
251
+ logger.debug("All sessions closed successfully")
@@ -39,10 +39,10 @@ class BaseConnector(ABC):
39
39
  logger.debug("Not connected to MCP implementation")
40
40
  return
41
41
 
42
- logger.info("Disconnecting from MCP implementation")
42
+ logger.debug("Disconnecting from MCP implementation")
43
43
  await self._cleanup_resources()
44
44
  self._connected = False
45
- logger.info("Disconnected from MCP implementation")
45
+ logger.debug("Disconnected from MCP implementation")
46
46
 
47
47
  async def _cleanup_resources(self) -> None:
48
48
  """Clean up all resources associated with this connector."""
@@ -83,7 +83,7 @@ class BaseConnector(ABC):
83
83
  if not self.client:
84
84
  raise RuntimeError("MCP client is not connected")
85
85
 
86
- logger.info("Initializing MCP session")
86
+ logger.debug("Initializing MCP session")
87
87
 
88
88
  # Initialize the session
89
89
  result = await self.client.initialize()
@@ -92,7 +92,7 @@ class BaseConnector(ABC):
92
92
  tools_result = await self.client.list_tools()
93
93
  self._tools = tools_result.tools
94
94
 
95
- logger.info(f"MCP session initialized with {len(self._tools)} tools")
95
+ logger.debug(f"MCP session initialized with {len(self._tools)} tools")
96
96
 
97
97
  return result
98
98
 
@@ -110,6 +110,7 @@ class BaseConnector(ABC):
110
110
 
111
111
  logger.debug(f"Calling tool '{name}' with arguments: {arguments}")
112
112
  result = await self.client.call_tool(name, arguments)
113
+ logger.debug(f"Tool '{name}' called with result: {result}")
113
114
  return result
114
115
 
115
116
  async def list_resources(self) -> list[dict[str, Any]]:
@@ -51,7 +51,7 @@ class HttpConnector(BaseConnector):
51
51
  logger.debug("Already connected to MCP implementation")
52
52
  return
53
53
 
54
- logger.info(f"Connecting to MCP implementation via HTTP/SSE: {self.base_url}")
54
+ logger.debug(f"Connecting to MCP implementation via HTTP/SSE: {self.base_url}")
55
55
  try:
56
56
  # Create the SSE connection URL
57
57
  sse_url = f"{self.base_url}"
@@ -68,7 +68,7 @@ class HttpConnector(BaseConnector):
68
68
 
69
69
  # Mark as connected
70
70
  self._connected = True
71
- logger.info(
71
+ logger.debug(
72
72
  f"Successfully connected to MCP implementation via HTTP/SSE: {self.base_url}"
73
73
  )
74
74
 
@@ -49,7 +49,7 @@ class StdioConnector(BaseConnector):
49
49
  logger.debug("Already connected to MCP implementation")
50
50
  return
51
51
 
52
- logger.info(f"Connecting to MCP implementation: {self.command}")
52
+ logger.debug(f"Connecting to MCP implementation: {self.command}")
53
53
  try:
54
54
  # Create server parameters
55
55
  server_params = StdioServerParameters(
@@ -66,7 +66,7 @@ class StdioConnector(BaseConnector):
66
66
 
67
67
  # Mark as connected
68
68
  self._connected = True
69
- logger.info(f"Successfully connected to MCP implementation: {self.command}")
69
+ logger.debug(f"Successfully connected to MCP implementation: {self.command}")
70
70
 
71
71
  except Exception as e:
72
72
  logger.error(f"Failed to connect to MCP implementation: {e}")
@@ -57,7 +57,7 @@ class WebSocketConnector(BaseConnector):
57
57
  logger.debug("Already connected to MCP implementation")
58
58
  return
59
59
 
60
- logger.info(f"Connecting to MCP implementation via WebSocket: {self.url}")
60
+ logger.debug(f"Connecting to MCP implementation via WebSocket: {self.url}")
61
61
  try:
62
62
  # Create and start the connection manager
63
63
  self._connection_manager = WebSocketConnectionManager(self.url, self.headers)
@@ -70,7 +70,7 @@ class WebSocketConnector(BaseConnector):
70
70
 
71
71
  # Mark as connected
72
72
  self._connected = True
73
- logger.info(f"Successfully connected to MCP implementation via WebSocket: {self.url}")
73
+ logger.debug(f"Successfully connected to MCP implementation via WebSocket: {self.url}")
74
74
 
75
75
  except Exception as e:
76
76
  logger.error(f"Failed to connect to MCP implementation via WebSocket: {e}")
@@ -117,10 +117,10 @@ class WebSocketConnector(BaseConnector):
117
117
  logger.debug("Not connected to MCP implementation")
118
118
  return
119
119
 
120
- logger.info("Disconnecting from MCP implementation")
120
+ logger.debug("Disconnecting from MCP implementation")
121
121
  await self._cleanup_resources()
122
122
  self._connected = False
123
- logger.info("Disconnected from MCP implementation")
123
+ logger.debug("Disconnected from MCP implementation")
124
124
 
125
125
  async def _cleanup_resources(self) -> None:
126
126
  """Clean up all resources associated with this connector."""
@@ -199,14 +199,14 @@ class WebSocketConnector(BaseConnector):
199
199
 
200
200
  async def initialize(self) -> dict[str, Any]:
201
201
  """Initialize the MCP session and return session information."""
202
- logger.info("Initializing MCP session")
202
+ logger.debug("Initializing MCP session")
203
203
  result = await self._send_request("initialize")
204
204
 
205
205
  # Get available tools
206
206
  tools_result = await self.list_tools()
207
207
  self._tools = [Tool(**tool) for tool in tools_result]
208
208
 
209
- logger.info(f"MCP session initialized with {len(self._tools)} tools")
209
+ logger.debug(f"MCP session initialized with {len(self._tools)} tools")
210
210
  return result
211
211
 
212
212
  async def list_tools(self) -> list[dict[str, Any]]:
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mcp-use"
3
- version = "1.0.3"
3
+ version = "1.1.5"
4
4
  description = "MCP Library for LLMs"
5
5
  authors = [
6
6
  {name = "Pietro Zullo", email = "pietro.zullo@gmail.com"}
Binary file
@@ -1,12 +0,0 @@
1
- <svg viewBox="0 0 200 60" xmlns="http://www.w3.org/2000/svg">
2
- <style>
3
- .logo-text {
4
- font-family: 'Nunito', sans-serif;
5
- font-size: 40px;
6
- font-weight: 800;
7
- fill: #ffffff;
8
- }
9
- </style>
10
- <rect width="100%" height="100%" fill="none"/>
11
- <text x="0" y="42" class="logo-text">mcp_use</text>
12
- </svg>
@@ -1,12 +0,0 @@
1
- <svg viewBox="0 0 200 60" xmlns="http://www.w3.org/2000/svg">
2
- <style>
3
- .logo-text {
4
- font-family: 'Nunito', sans-serif;
5
- font-size: 40px;
6
- font-weight: 800;
7
- fill: #000000;
8
- }
9
- </style>
10
- <rect width="100%" height="100%" fill="none"/>
11
- <text x="0" y="42" class="logo-text">mcp_use</text>
12
- </svg>
@@ -1,138 +0,0 @@
1
- ---
2
- title: Quickstart
3
- description: "Get started with mcp_use in minutes"
4
- ---
5
-
6
- # Quickstart Guide
7
-
8
- This guide will help you get started with mcp_use quickly. We'll cover installation, basic configuration, and running your first agent.
9
-
10
- ## Installation
11
-
12
- You can install mcp_use using pip:
13
-
14
- ```bash
15
- pip install mcp-use
16
- ```
17
-
18
- Or install from source:
19
-
20
- ```bash
21
- git clone https://github.com/pietrozullo/mcp-use.git
22
- cd mcp-use
23
- pip install -e .
24
- ```
25
-
26
- ## Basic Setup
27
-
28
- 1. Create a configuration file (e.g., `browser_mcp.json`):
29
-
30
- ```json
31
- {
32
- "mcpServers": {
33
- "playwright": {
34
- "command": "npx",
35
- "args": ["@playwright/mcp@latest"],
36
- "env": {
37
- "DISPLAY": ":1"
38
- }
39
- }
40
- }
41
- }
42
- ```
43
-
44
- 2. Set up your environment variables in a `.env` file:
45
-
46
- ```bash
47
- OPENAI_API_KEY=your_api_key_here
48
- ANTHROPIC_API_KEY=your_api_key_here
49
- ```
50
-
51
- ## Your First Agent
52
-
53
- Here's a simple example to get you started:
54
-
55
- ```python
56
- import asyncio
57
- import os
58
- from dotenv import load_dotenv
59
- from langchain_openai import ChatOpenAI
60
- from mcp_use import MCPAgent, MCPClient
61
-
62
- async def main():
63
- # Load environment variables
64
- load_dotenv()
65
-
66
- # Create MCPClient from config file
67
- client = MCPClient.from_config_file("browser_mcp.json")
68
-
69
- # Create LLM
70
- llm = ChatOpenAI(model="gpt-4o")
71
-
72
- # Create agent with the client
73
- agent = MCPAgent(llm=llm, client=client, max_steps=30)
74
-
75
- # Run the query
76
- result = await agent.run(
77
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
78
- )
79
- print(f"\nResult: {result}")
80
-
81
- if __name__ == "__main__":
82
- asyncio.run(main())
83
- ```
84
-
85
- ## Restricting Tool Access
86
-
87
- You can control which tools are available to the agent:
88
-
89
- ```python
90
- import asyncio
91
- import os
92
- from dotenv import load_dotenv
93
- from langchain_openai import ChatOpenAI
94
- from mcp_use import MCPAgent, MCPClient
95
-
96
- async def main():
97
- # Load environment variables
98
- load_dotenv()
99
-
100
- # Create MCPClient from config file
101
- client = MCPClient.from_config_file("browser_mcp.json")
102
-
103
- # Create LLM
104
- llm = ChatOpenAI(model="gpt-4o")
105
-
106
- # Create agent with restricted tools
107
- agent = MCPAgent(
108
- llm=llm,
109
- client=client,
110
- max_steps=30,
111
- disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
112
- )
113
-
114
- # Run the query
115
- result = await agent.run(
116
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
117
- )
118
- print(f"\nResult: {result}")
119
-
120
- if __name__ == "__main__":
121
- asyncio.run(main())
122
- ```
123
-
124
- ## Available MCP Servers
125
-
126
- mcp_use supports various MCP servers:
127
-
128
- - **Playwright**: For web browsing and automation
129
- - **Airbnb**: For property search and booking
130
- - **Blender**: For 3D modeling and animation
131
-
132
- Each server requires its own configuration. Check the [Configuration Guide](/essentials/configuration) for details.
133
-
134
- ## Next Steps
135
-
136
- - Learn about [Configuration Options](/essentials/configuration)
137
- - Explore [Example Use Cases](/examples)
138
- - Check out [Advanced Features](/essentials/advanced)
Binary file
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes