mcp-use 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

@@ -0,0 +1,142 @@
1
+ """
2
+ WebSocket connector for MCP implementations.
3
+
4
+ This module provides a connector for communicating with MCP implementations
5
+ through WebSocket connections.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import uuid
11
+ from typing import Any
12
+
13
+ import websockets
14
+ from websockets.client import WebSocketClientProtocol
15
+
16
+ from .base import BaseConnector
17
+
18
+
19
+ class WebSocketConnector(BaseConnector):
20
+ """Connector for MCP implementations using WebSocket transport.
21
+
22
+ This connector uses WebSockets to communicate with remote MCP implementations.
23
+ """
24
+
25
+ def __init__(
26
+ self, url: str, auth_token: str | None = None, headers: dict[str, str] | None = None
27
+ ):
28
+ """Initialize a new WebSocket connector.
29
+
30
+ Args:
31
+ url: The WebSocket URL to connect to.
32
+ auth_token: Optional authentication token.
33
+ headers: Optional additional headers.
34
+ """
35
+ self.url = url
36
+ self.auth_token = auth_token
37
+ self.headers = headers or {}
38
+ if auth_token:
39
+ self.headers["Authorization"] = f"Bearer {auth_token}"
40
+ self.ws: WebSocketClientProtocol | None = None
41
+ self.pending_requests: dict[str, asyncio.Future] = {}
42
+
43
+ async def connect(self) -> None:
44
+ """Establish a connection to the MCP implementation."""
45
+ self.ws = await websockets.connect(self.url, extra_headers=self.headers)
46
+
47
+ # Start the message receiver task
48
+ self._receiver_task = asyncio.create_task(self._receive_messages())
49
+
50
+ async def _receive_messages(self) -> None:
51
+ """Continuously receive and process messages from the WebSocket."""
52
+ if not self.ws:
53
+ raise RuntimeError("WebSocket is not connected")
54
+
55
+ try:
56
+ async for message in self.ws:
57
+ # Parse the message
58
+ data = json.loads(message)
59
+
60
+ # Check if this is a response to a pending request
61
+ request_id = data.get("id")
62
+ if request_id and request_id in self.pending_requests:
63
+ future = self.pending_requests.pop(request_id)
64
+ if "result" in data:
65
+ future.set_result(data["result"])
66
+ elif "error" in data:
67
+ future.set_exception(Exception(data["error"]))
68
+ except Exception as e:
69
+ # If the websocket connection was closed or errored,
70
+ # reject all pending requests
71
+ for future in self.pending_requests.values():
72
+ if not future.done():
73
+ future.set_exception(e)
74
+
75
+ async def disconnect(self) -> None:
76
+ """Close the connection to the MCP implementation."""
77
+ if self._receiver_task:
78
+ self._receiver_task.cancel()
79
+ try:
80
+ await self._receiver_task
81
+ except asyncio.CancelledError:
82
+ pass
83
+
84
+ if self.ws:
85
+ await self.ws.close()
86
+ self.ws = None
87
+
88
+ # Reject any pending requests
89
+ for future in self.pending_requests.values():
90
+ if not future.done():
91
+ future.set_exception(ConnectionError("WebSocket disconnected"))
92
+ self.pending_requests.clear()
93
+
94
+ async def _send_request(self, method: str, params: dict[str, Any] | None = None) -> Any:
95
+ """Send a request and wait for a response."""
96
+ if not self.ws:
97
+ raise RuntimeError("WebSocket is not connected")
98
+
99
+ # Create a request ID
100
+ request_id = str(uuid.uuid4())
101
+
102
+ # Create a future to receive the response
103
+ future = asyncio.Future()
104
+ self.pending_requests[request_id] = future
105
+
106
+ # Send the request
107
+ await self.ws.send(json.dumps({"id": request_id, "method": method, "params": params or {}}))
108
+
109
+ # Wait for the response
110
+ try:
111
+ return await future
112
+ except Exception as e:
113
+ # Remove the request from pending requests
114
+ self.pending_requests.pop(request_id, None)
115
+ raise e
116
+
117
+ async def initialize(self) -> dict[str, Any]:
118
+ """Initialize the MCP session and return session information."""
119
+ return await self._send_request("initialize")
120
+
121
+ async def list_tools(self) -> list[dict[str, Any]]:
122
+ """List all available tools from the MCP implementation."""
123
+ result = await self._send_request("tools/list")
124
+ return result.get("tools", [])
125
+
126
+ async def call_tool(self, name: str, arguments: dict[str, Any]) -> Any:
127
+ """Call an MCP tool with the given arguments."""
128
+ return await self._send_request("tools/call", {"name": name, "arguments": arguments})
129
+
130
+ async def list_resources(self) -> list[dict[str, Any]]:
131
+ """List all available resources from the MCP implementation."""
132
+ result = await self._send_request("resources/list")
133
+ return result
134
+
135
+ async def read_resource(self, uri: str) -> tuple[bytes, str]:
136
+ """Read a resource by URI."""
137
+ result = await self._send_request("resources/read", {"uri": uri})
138
+ return result.get("content", b""), result.get("mimeType", "")
139
+
140
+ async def request(self, method: str, params: dict[str, Any] | None = None) -> Any:
141
+ """Send a raw request to the MCP implementation."""
142
+ return await self._send_request(method, params)
mcp_use/logging.py ADDED
@@ -0,0 +1,96 @@
1
+ """
2
+ Logger module for mcp_use.
3
+
4
+ This module provides a centralized logging configuration for the mcp_use library,
5
+ with customizable log levels and formatters.
6
+ """
7
+
8
+ import logging
9
+ import os
10
+ import sys
11
+
12
+
13
+ class Logger:
14
+ """Centralized logger for mcp_use.
15
+
16
+ This class provides logging functionality with configurable levels,
17
+ formatters, and handlers.
18
+ """
19
+
20
+ # Default log format
21
+ DEFAULT_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
22
+
23
+ # Module-specific loggers
24
+ _loggers = {}
25
+
26
+ @classmethod
27
+ def get_logger(cls, name: str = "mcp_use") -> logging.Logger:
28
+ """Get a logger instance for the specified name.
29
+
30
+ Args:
31
+ name: Logger name, usually the module name (defaults to 'mcp_use')
32
+
33
+ Returns:
34
+ Configured logger instance
35
+ """
36
+ if name in cls._loggers:
37
+ return cls._loggers[name]
38
+
39
+ # Create new logger
40
+ logger = logging.getLogger(name)
41
+ cls._loggers[name] = logger
42
+
43
+ return logger
44
+
45
+ @classmethod
46
+ def configure(
47
+ cls,
48
+ level: int | str = logging.INFO,
49
+ format_str: str | None = None,
50
+ log_to_console: bool = True,
51
+ log_to_file: str | None = None,
52
+ ) -> None:
53
+ """Configure the root mcp_use logger.
54
+
55
+ Args:
56
+ level: Log level (default: INFO)
57
+ format_str: Log format string (default: DEFAULT_FORMAT)
58
+ log_to_console: Whether to log to console (default: True)
59
+ log_to_file: Path to log file (default: None)
60
+ """
61
+ root_logger = cls.get_logger()
62
+
63
+ # Set level
64
+ if isinstance(level, str):
65
+ level = getattr(logging, level.upper())
66
+ root_logger.setLevel(level)
67
+
68
+ # Clear existing handlers
69
+ for handler in root_logger.handlers[:]:
70
+ root_logger.removeHandler(handler)
71
+
72
+ # Set formatter
73
+ formatter = logging.Formatter(format_str or cls.DEFAULT_FORMAT)
74
+
75
+ # Add console handler if requested
76
+ if log_to_console:
77
+ console_handler = logging.StreamHandler(sys.stdout)
78
+ console_handler.setFormatter(formatter)
79
+ root_logger.addHandler(console_handler)
80
+
81
+ # Add file handler if requested
82
+ if log_to_file:
83
+ # Ensure directory exists
84
+ log_dir = os.path.dirname(log_to_file)
85
+ if log_dir and not os.path.exists(log_dir):
86
+ os.makedirs(log_dir)
87
+
88
+ file_handler = logging.FileHandler(log_to_file)
89
+ file_handler.setFormatter(formatter)
90
+ root_logger.addHandler(file_handler)
91
+
92
+
93
+ # Configure default logger
94
+ Logger.configure()
95
+
96
+ logger = Logger.get_logger()
mcp_use/session.py ADDED
@@ -0,0 +1,168 @@
1
+ """
2
+ Session manager for MCP connections.
3
+
4
+ This module provides a session manager for MCP connections,
5
+ which handles authentication, initialization, and tool discovery.
6
+ """
7
+
8
+ from typing import Any
9
+
10
+ from .connectors.base import BaseConnector
11
+ from .tools.converter import ModelProvider, ToolConverter
12
+
13
+
14
+ class MCPSession:
15
+ """Session manager for MCP connections.
16
+
17
+ This class manages the lifecycle of an MCP connection, including
18
+ authentication, initialization, and tool discovery.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ connector: BaseConnector,
24
+ model_provider: str | ModelProvider,
25
+ auto_connect: bool = True,
26
+ ) -> None:
27
+ """Initialize a new MCP session.
28
+
29
+ Args:
30
+ connector: The connector to use for communicating with the MCP implementation.
31
+ model_provider: The model provider to convert tools for.
32
+ auto_connect: Whether to automatically connect to the MCP implementation.
33
+ """
34
+ self.connector = connector
35
+ self.tool_converter = ToolConverter(model_provider)
36
+ self.session_info: dict[str, Any] | None = None
37
+ self.tools: list[dict[str, Any]] = []
38
+ self.auto_connect = auto_connect
39
+
40
+ async def __aenter__(self) -> "MCPSession":
41
+ """Enter the async context manager.
42
+
43
+ Returns:
44
+ The session instance.
45
+ """
46
+ await self.connect()
47
+ return self
48
+
49
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
50
+ """Exit the async context manager.
51
+
52
+ Args:
53
+ exc_type: The exception type, if an exception was raised.
54
+ exc_val: The exception value, if an exception was raised.
55
+ exc_tb: The exception traceback, if an exception was raised.
56
+ """
57
+ await self.disconnect()
58
+
59
+ async def connect(self) -> None:
60
+ """Connect to the MCP implementation."""
61
+ await self.connector.connect()
62
+
63
+ async def disconnect(self) -> None:
64
+ """Disconnect from the MCP implementation."""
65
+ await self.connector.disconnect()
66
+
67
+ async def initialize(self) -> dict[str, Any]:
68
+ """Initialize the MCP session and discover available tools.
69
+
70
+ Returns:
71
+ The session information returned by the MCP implementation.
72
+ """
73
+ # Make sure we're connected
74
+ if not self.is_connected and self.auto_connect:
75
+ await self.connect()
76
+
77
+ # Initialize the session
78
+ self.session_info = await self.connector.initialize()
79
+
80
+ # Discover available tools
81
+ await self.discover_tools()
82
+
83
+ return self.session_info
84
+
85
+ @property
86
+ def is_connected(self) -> bool:
87
+ """Check if the connector is connected.
88
+
89
+ Returns:
90
+ True if the connector is connected, False otherwise.
91
+ """
92
+ return hasattr(self.connector, "client") and self.connector.client is not None
93
+
94
+ async def discover_tools(self) -> list[dict[str, Any]]:
95
+ """Discover available tools from the MCP implementation.
96
+
97
+ Returns:
98
+ The list of available tools in MCP format.
99
+ """
100
+ self.tools = self.connector.tools
101
+ return self.tools
102
+
103
+ def get_tools_for_llm(self) -> list[dict[str, Any]]:
104
+ """Get the tools in the format required by the LLM.
105
+
106
+ Returns:
107
+ The list of tools in the LLM-specific format.
108
+ """
109
+ return self.tool_converter.convert_tools(self.tools)
110
+
111
+ async def call_tool(self, name: str, arguments: dict[str, Any]) -> Any:
112
+ """Call an MCP tool with the given arguments.
113
+
114
+ Args:
115
+ name: The name of the tool to call.
116
+ arguments: The arguments to pass to the tool.
117
+
118
+ Returns:
119
+ The result of the tool call.
120
+ """
121
+ # Make sure we're connected
122
+ if not self.is_connected and self.auto_connect:
123
+ await self.connect()
124
+
125
+ return await self.connector.call_tool(name, arguments)
126
+
127
+ async def process_tool_calls(self, response: dict[str, Any]) -> list[dict[str, Any]]:
128
+ """Process tool calls from an LLM response.
129
+
130
+ This method parses tool calls from the LLM response, executes them,
131
+ and returns the results.
132
+
133
+ Args:
134
+ response: The response from the LLM.
135
+
136
+ Returns:
137
+ A list of tool call results, each containing 'name', 'arguments', and 'result' keys.
138
+ """
139
+ # Parse tool calls from the response
140
+ tool_calls = self.tool_converter.parse_tool_calls(response)
141
+
142
+ # Execute each tool call
143
+ results = []
144
+ for tool_call in tool_calls:
145
+ name = tool_call["name"]
146
+ arguments = tool_call["arguments"]
147
+
148
+ try:
149
+ result = await self.call_tool(name, arguments)
150
+ results.append(
151
+ {
152
+ "name": name,
153
+ "arguments": arguments,
154
+ "result": result,
155
+ "error": None,
156
+ }
157
+ )
158
+ except Exception as e:
159
+ results.append(
160
+ {
161
+ "name": name,
162
+ "arguments": arguments,
163
+ "result": None,
164
+ "error": str(e),
165
+ }
166
+ )
167
+
168
+ return results
@@ -0,0 +1,11 @@
1
+ """
2
+ Tool conversion utilities.
3
+
4
+ This module provides utilities for converting between MCP tool schemas
5
+ and LLM-specific tool formats.
6
+ """
7
+
8
+ from .converter import ToolConverter
9
+ from .formats import AnthropicToolFormat, OpenAIToolFormat, ToolFormat
10
+
11
+ __all__ = ["ToolConverter", "ToolFormat", "OpenAIToolFormat", "AnthropicToolFormat"]
@@ -0,0 +1,108 @@
1
+ """
2
+ Tool converter for different LLM providers.
3
+
4
+ This module provides utilities for converting between MCP tool schemas
5
+ and LLM-specific formats.
6
+ """
7
+
8
+ from enum import Enum, auto
9
+ from typing import Any
10
+
11
+ from .formats import AnthropicToolFormat, OpenAIToolFormat, ToolFormat
12
+
13
+
14
+ class ModelProvider(Enum):
15
+ """Enum for supported model providers."""
16
+
17
+ OPENAI = auto()
18
+ ANTHROPIC = auto()
19
+
20
+ @classmethod
21
+ def from_string(cls, value: str) -> "ModelProvider":
22
+ """Convert a string to a ModelProvider enum.
23
+
24
+ Args:
25
+ value: The string to convert.
26
+
27
+ Returns:
28
+ The corresponding ModelProvider enum value.
29
+
30
+ Raises:
31
+ ValueError: If the string is not a valid model provider.
32
+ """
33
+ value = value.lower()
34
+ if value in ("openai", "open_ai", "open-ai"):
35
+ return cls.OPENAI
36
+ elif value in ("anthropic", "claude"):
37
+ return cls.ANTHROPIC
38
+ else:
39
+ raise ValueError(f"Unsupported model provider: {value}")
40
+
41
+
42
+ class ToolConverter:
43
+ """Converter for MCP tools to different LLM formats.
44
+
45
+ This class provides utilities for converting between MCP tool schemas
46
+ and LLM-specific formats.
47
+ """
48
+
49
+ _format_classes: dict[ModelProvider, type[ToolFormat]] = {
50
+ ModelProvider.OPENAI: OpenAIToolFormat,
51
+ ModelProvider.ANTHROPIC: AnthropicToolFormat,
52
+ }
53
+
54
+ def __init__(self, provider: str | ModelProvider) -> None:
55
+ """Initialize a new tool converter.
56
+
57
+ Args:
58
+ provider: The model provider to convert tools for.
59
+ Can be a string or a ModelProvider enum.
60
+
61
+ Raises:
62
+ ValueError: If the provider is not supported.
63
+ """
64
+ if isinstance(provider, str):
65
+ self.provider = ModelProvider.from_string(provider)
66
+ else:
67
+ self.provider = provider
68
+
69
+ # Create an instance of the appropriate format class
70
+ format_class = self._format_classes.get(self.provider)
71
+ if not format_class:
72
+ raise ValueError(f"Unsupported model provider: {provider}")
73
+
74
+ self._format = format_class()
75
+
76
+ def convert_tools(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
77
+ """Convert a list of MCP tools to the LLM-specific format.
78
+
79
+ Args:
80
+ tools: The list of MCP tools to convert.
81
+
82
+ Returns:
83
+ The converted tools in the LLM-specific format.
84
+ """
85
+ return [self._format.convert_tool(tool) for tool in tools]
86
+
87
+ def convert_tool_call(self, name: str, arguments: dict[str, Any]) -> dict[str, Any]:
88
+ """Convert a tool call to the MCP format.
89
+
90
+ Args:
91
+ name: The name of the tool being called.
92
+ arguments: The arguments for the tool call.
93
+
94
+ Returns:
95
+ The converted tool call in the MCP format.
96
+ """
97
+ return self._format.convert_tool_call(name, arguments)
98
+
99
+ def parse_tool_calls(self, response: dict[str, Any]) -> list[dict[str, Any]]:
100
+ """Parse tool calls from an LLM response.
101
+
102
+ Args:
103
+ response: The response from the LLM.
104
+
105
+ Returns:
106
+ A list of parsed tool calls, each containing 'name' and 'arguments' keys.
107
+ """
108
+ return self._format.parse_tool_call(response)