openai-agents 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/mcp/server.py ADDED
@@ -0,0 +1,301 @@
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import asyncio
5
+ from contextlib import AbstractAsyncContextManager, AsyncExitStack
6
+ from pathlib import Path
7
+ from typing import Any, Literal
8
+
9
+ from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
10
+ from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
11
+ from mcp.client.sse import sse_client
12
+ from mcp.types import CallToolResult, JSONRPCMessage
13
+ from typing_extensions import NotRequired, TypedDict
14
+
15
+ from ..exceptions import UserError
16
+ from ..logger import logger
17
+
18
+
19
+ class MCPServer(abc.ABC):
20
+ """Base class for Model Context Protocol servers."""
21
+
22
+ @abc.abstractmethod
23
+ async def connect(self):
24
+ """Connect to the server. For example, this might mean spawning a subprocess or
25
+ opening a network connection. The server is expected to remain connected until
26
+ `cleanup()` is called.
27
+ """
28
+ pass
29
+
30
+ @property
31
+ @abc.abstractmethod
32
+ def name(self) -> str:
33
+ """A readable name for the server."""
34
+ pass
35
+
36
+ @abc.abstractmethod
37
+ async def cleanup(self):
38
+ """Cleanup the server. For example, this might mean closing a subprocess or
39
+ closing a network connection.
40
+ """
41
+ pass
42
+
43
+ @abc.abstractmethod
44
+ async def list_tools(self) -> list[MCPTool]:
45
+ """List the tools available on the server."""
46
+ pass
47
+
48
+ @abc.abstractmethod
49
+ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult:
50
+ """Invoke a tool on the server."""
51
+ pass
52
+
53
+
54
+ class _MCPServerWithClientSession(MCPServer, abc.ABC):
55
+ """Base class for MCP servers that use a `ClientSession` to communicate with the server."""
56
+
57
+ def __init__(self, cache_tools_list: bool):
58
+ """
59
+ Args:
60
+ cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
61
+ cached and only fetched from the server once. If `False`, the tools list will be
62
+ fetched from the server on each call to `list_tools()`. The cache can be invalidated
63
+ by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
64
+ server will not change its tools list, because it can drastically improve latency
65
+ (by avoiding a round-trip to the server every time).
66
+ """
67
+ self.session: ClientSession | None = None
68
+ self.exit_stack: AsyncExitStack = AsyncExitStack()
69
+ self._cleanup_lock: asyncio.Lock = asyncio.Lock()
70
+ self.cache_tools_list = cache_tools_list
71
+
72
+ # The cache is always dirty at startup, so that we fetch tools at least once
73
+ self._cache_dirty = True
74
+ self._tools_list: list[MCPTool] | None = None
75
+
76
+ @abc.abstractmethod
77
+ def create_streams(
78
+ self,
79
+ ) -> AbstractAsyncContextManager[
80
+ tuple[
81
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
82
+ MemoryObjectSendStream[JSONRPCMessage],
83
+ ]
84
+ ]:
85
+ """Create the streams for the server."""
86
+ pass
87
+
88
+ async def __aenter__(self):
89
+ await self.connect()
90
+ return self
91
+
92
+ async def __aexit__(self, exc_type, exc_value, traceback):
93
+ await self.cleanup()
94
+
95
+ def invalidate_tools_cache(self):
96
+ """Invalidate the tools cache."""
97
+ self._cache_dirty = True
98
+
99
+ async def connect(self):
100
+ """Connect to the server."""
101
+ try:
102
+ transport = await self.exit_stack.enter_async_context(self.create_streams())
103
+ read, write = transport
104
+ session = await self.exit_stack.enter_async_context(ClientSession(read, write))
105
+ await session.initialize()
106
+ self.session = session
107
+ except Exception as e:
108
+ logger.error(f"Error initializing MCP server: {e}")
109
+ await self.cleanup()
110
+ raise
111
+
112
+ async def list_tools(self) -> list[MCPTool]:
113
+ """List the tools available on the server."""
114
+ if not self.session:
115
+ raise UserError("Server not initialized. Make sure you call `connect()` first.")
116
+
117
+ # Return from cache if caching is enabled, we have tools, and the cache is not dirty
118
+ if self.cache_tools_list and not self._cache_dirty and self._tools_list:
119
+ return self._tools_list
120
+
121
+ # Reset the cache dirty to False
122
+ self._cache_dirty = False
123
+
124
+ # Fetch the tools from the server
125
+ self._tools_list = (await self.session.list_tools()).tools
126
+ return self._tools_list
127
+
128
+ async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult:
129
+ """Invoke a tool on the server."""
130
+ if not self.session:
131
+ raise UserError("Server not initialized. Make sure you call `connect()` first.")
132
+
133
+ return await self.session.call_tool(tool_name, arguments)
134
+
135
+ async def cleanup(self):
136
+ """Cleanup the server."""
137
+ async with self._cleanup_lock:
138
+ try:
139
+ await self.exit_stack.aclose()
140
+ self.session = None
141
+ except Exception as e:
142
+ logger.error(f"Error cleaning up server: {e}")
143
+
144
+
145
+ class MCPServerStdioParams(TypedDict):
146
+ """Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another
147
+ import.
148
+ """
149
+
150
+ command: str
151
+ """The executable to run to start the server. For example, `python` or `node`."""
152
+
153
+ args: NotRequired[list[str]]
154
+ """Command line args to pass to the `command` executable. For example, `['foo.py']` or
155
+ `['server.js', '--port', '8080']`."""
156
+
157
+ env: NotRequired[dict[str, str]]
158
+ """The environment variables to set for the server. ."""
159
+
160
+ cwd: NotRequired[str | Path]
161
+ """The working directory to use when spawning the process."""
162
+
163
+ encoding: NotRequired[str]
164
+ """The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`."""
165
+
166
+ encoding_error_handler: NotRequired[Literal["strict", "ignore", "replace"]]
167
+ """The text encoding error handler. Defaults to `strict`.
168
+
169
+ See https://docs.python.org/3/library/codecs.html#codec-base-classes for
170
+ explanations of possible values.
171
+ """
172
+
173
+
174
+ class MCPServerStdio(_MCPServerWithClientSession):
175
+ """MCP server implementation that uses the stdio transport. See the [spec]
176
+ (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for
177
+ details.
178
+ """
179
+
180
+ def __init__(
181
+ self,
182
+ params: MCPServerStdioParams,
183
+ cache_tools_list: bool = False,
184
+ name: str | None = None,
185
+ ):
186
+ """Create a new MCP server based on the stdio transport.
187
+
188
+ Args:
189
+ params: The params that configure the server. This includes the command to run to
190
+ start the server, the args to pass to the command, the environment variables to
191
+ set for the server, the working directory to use when spawning the process, and
192
+ the text encoding used when sending/receiving messages to the server.
193
+ cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
194
+ cached and only fetched from the server once. If `False`, the tools list will be
195
+ fetched from the server on each call to `list_tools()`. The cache can be
196
+ invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
197
+ if you know the server will not change its tools list, because it can drastically
198
+ improve latency (by avoiding a round-trip to the server every time).
199
+ name: A readable name for the server. If not provided, we'll create one from the
200
+ command.
201
+ """
202
+ super().__init__(cache_tools_list)
203
+
204
+ self.params = StdioServerParameters(
205
+ command=params["command"],
206
+ args=params.get("args", []),
207
+ env=params.get("env"),
208
+ cwd=params.get("cwd"),
209
+ encoding=params.get("encoding", "utf-8"),
210
+ encoding_error_handler=params.get("encoding_error_handler", "strict"),
211
+ )
212
+
213
+ self._name = name or f"stdio: {self.params.command}"
214
+
215
+ def create_streams(
216
+ self,
217
+ ) -> AbstractAsyncContextManager[
218
+ tuple[
219
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
220
+ MemoryObjectSendStream[JSONRPCMessage],
221
+ ]
222
+ ]:
223
+ """Create the streams for the server."""
224
+ return stdio_client(self.params)
225
+
226
+ @property
227
+ def name(self) -> str:
228
+ """A readable name for the server."""
229
+ return self._name
230
+
231
+
232
+ class MCPServerSseParams(TypedDict):
233
+ """Mirrors the params in`mcp.client.sse.sse_client`."""
234
+
235
+ url: str
236
+ """The URL of the server."""
237
+
238
+ headers: NotRequired[dict[str, str]]
239
+ """The headers to send to the server."""
240
+
241
+ timeout: NotRequired[float]
242
+ """The timeout for the HTTP request. Defaults to 5 seconds."""
243
+
244
+ sse_read_timeout: NotRequired[float]
245
+ """The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
246
+
247
+
248
+ class MCPServerSse(_MCPServerWithClientSession):
249
+ """MCP server implementation that uses the HTTP with SSE transport. See the [spec]
250
+ (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse)
251
+ for details.
252
+ """
253
+
254
+ def __init__(
255
+ self,
256
+ params: MCPServerSseParams,
257
+ cache_tools_list: bool = False,
258
+ name: str | None = None,
259
+ ):
260
+ """Create a new MCP server based on the HTTP with SSE transport.
261
+
262
+ Args:
263
+ params: The params that configure the server. This includes the URL of the server,
264
+ the headers to send to the server, the timeout for the HTTP request, and the
265
+ timeout for the SSE connection.
266
+
267
+ cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
268
+ cached and only fetched from the server once. If `False`, the tools list will be
269
+ fetched from the server on each call to `list_tools()`. The cache can be
270
+ invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
271
+ if you know the server will not change its tools list, because it can drastically
272
+ improve latency (by avoiding a round-trip to the server every time).
273
+
274
+ name: A readable name for the server. If not provided, we'll create one from the
275
+ URL.
276
+ """
277
+ super().__init__(cache_tools_list)
278
+
279
+ self.params = params
280
+ self._name = name or f"sse: {self.params['url']}"
281
+
282
+ def create_streams(
283
+ self,
284
+ ) -> AbstractAsyncContextManager[
285
+ tuple[
286
+ MemoryObjectReceiveStream[JSONRPCMessage | Exception],
287
+ MemoryObjectSendStream[JSONRPCMessage],
288
+ ]
289
+ ]:
290
+ """Create the streams for the server."""
291
+ return sse_client(
292
+ url=self.params["url"],
293
+ headers=self.params.get("headers", None),
294
+ timeout=self.params.get("timeout", 5),
295
+ sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
296
+ )
297
+
298
+ @property
299
+ def name(self) -> str:
300
+ """A readable name for the server."""
301
+ return self._name
agents/mcp/util.py ADDED
@@ -0,0 +1,115 @@
1
+ import functools
2
+ import json
3
+ from typing import TYPE_CHECKING, Any
4
+
5
+ from .. import _debug
6
+ from ..exceptions import AgentsException, ModelBehaviorError, UserError
7
+ from ..logger import logger
8
+ from ..run_context import RunContextWrapper
9
+ from ..tool import FunctionTool, Tool
10
+ from ..tracing import FunctionSpanData, get_current_span, mcp_tools_span
11
+
12
+ if TYPE_CHECKING:
13
+ from mcp.types import Tool as MCPTool
14
+
15
+ from .server import MCPServer
16
+
17
+
18
+ class MCPUtil:
19
+ """Set of utilities for interop between MCP and Agents SDK tools."""
20
+
21
+ @classmethod
22
+ async def get_all_function_tools(cls, servers: list["MCPServer"]) -> list[Tool]:
23
+ """Get all function tools from a list of MCP servers."""
24
+ tools = []
25
+ tool_names: set[str] = set()
26
+ for server in servers:
27
+ server_tools = await cls.get_function_tools(server)
28
+ server_tool_names = {tool.name for tool in server_tools}
29
+ if len(server_tool_names & tool_names) > 0:
30
+ raise UserError(
31
+ f"Duplicate tool names found across MCP servers: "
32
+ f"{server_tool_names & tool_names}"
33
+ )
34
+ tool_names.update(server_tool_names)
35
+ tools.extend(server_tools)
36
+
37
+ return tools
38
+
39
+ @classmethod
40
+ async def get_function_tools(cls, server: "MCPServer") -> list[Tool]:
41
+ """Get all function tools from a single MCP server."""
42
+
43
+ with mcp_tools_span(server=server.name) as span:
44
+ tools = await server.list_tools()
45
+ span.span_data.result = [tool.name for tool in tools]
46
+
47
+ return [cls.to_function_tool(tool, server) for tool in tools]
48
+
49
+ @classmethod
50
+ def to_function_tool(cls, tool: "MCPTool", server: "MCPServer") -> FunctionTool:
51
+ """Convert an MCP tool to an Agents SDK function tool."""
52
+ invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool)
53
+ return FunctionTool(
54
+ name=tool.name,
55
+ description=tool.description or "",
56
+ params_json_schema=tool.inputSchema,
57
+ on_invoke_tool=invoke_func,
58
+ strict_json_schema=False,
59
+ )
60
+
61
+ @classmethod
62
+ async def invoke_mcp_tool(
63
+ cls, server: "MCPServer", tool: "MCPTool", context: RunContextWrapper[Any], input_json: str
64
+ ) -> str:
65
+ """Invoke an MCP tool and return the result as a string."""
66
+ try:
67
+ json_data: dict[str, Any] = json.loads(input_json) if input_json else {}
68
+ except Exception as e:
69
+ if _debug.DONT_LOG_TOOL_DATA:
70
+ logger.debug(f"Invalid JSON input for tool {tool.name}")
71
+ else:
72
+ logger.debug(f"Invalid JSON input for tool {tool.name}: {input_json}")
73
+ raise ModelBehaviorError(
74
+ f"Invalid JSON input for tool {tool.name}: {input_json}"
75
+ ) from e
76
+
77
+ if _debug.DONT_LOG_TOOL_DATA:
78
+ logger.debug(f"Invoking MCP tool {tool.name}")
79
+ else:
80
+ logger.debug(f"Invoking MCP tool {tool.name} with input {input_json}")
81
+
82
+ try:
83
+ result = await server.call_tool(tool.name, json_data)
84
+ except Exception as e:
85
+ logger.error(f"Error invoking MCP tool {tool.name}: {e}")
86
+ raise AgentsException(f"Error invoking MCP tool {tool.name}: {e}") from e
87
+
88
+ if _debug.DONT_LOG_TOOL_DATA:
89
+ logger.debug(f"MCP tool {tool.name} completed.")
90
+ else:
91
+ logger.debug(f"MCP tool {tool.name} returned {result}")
92
+
93
+ # The MCP tool result is a list of content items, whereas OpenAI tool outputs are a single
94
+ # string. We'll try to convert.
95
+ if len(result.content) == 1:
96
+ tool_output = result.content[0].model_dump_json()
97
+ elif len(result.content) > 1:
98
+ tool_output = json.dumps([item.model_dump() for item in result.content])
99
+ else:
100
+ logger.error(f"Errored MCP tool result: {result}")
101
+ tool_output = "Error running tool."
102
+
103
+ current_span = get_current_span()
104
+ if current_span:
105
+ if isinstance(current_span.span_data, FunctionSpanData):
106
+ current_span.span_data.output = tool_output
107
+ current_span.span_data.mcp_data = {
108
+ "server": server.name,
109
+ }
110
+ else:
111
+ logger.warning(
112
+ f"Current span is not a FunctionSpanData, skipping tool output: {current_span}"
113
+ )
114
+
115
+ return tool_output
@@ -757,7 +757,7 @@ class _Converter:
757
757
  elif isinstance(c, dict) and c.get("type") == "input_file":
758
758
  raise UserError(f"File uploads are not supported for chat completions {c}")
759
759
  else:
760
- raise UserError(f"Unknonw content: {c}")
760
+ raise UserError(f"Unknown content: {c}")
761
761
  return out
762
762
 
763
763
  @classmethod
@@ -83,7 +83,7 @@ class OpenAIResponsesModel(Model):
83
83
  )
84
84
 
85
85
  if _debug.DONT_LOG_MODEL_DATA:
86
- logger.debug("LLM responsed")
86
+ logger.debug("LLM responded")
87
87
  else:
88
88
  logger.debug(
89
89
  "LLM resp:\n"
@@ -208,7 +208,11 @@ class OpenAIResponsesModel(Model):
208
208
  list_input = ItemHelpers.input_to_new_input_list(input)
209
209
 
210
210
  parallel_tool_calls = (
211
- True if model_settings.parallel_tool_calls and tools and len(tools) > 0 else NOT_GIVEN
211
+ True
212
+ if model_settings.parallel_tool_calls and tools and len(tools) > 0
213
+ else False
214
+ if model_settings.parallel_tool_calls is False
215
+ else NOT_GIVEN
212
216
  )
213
217
 
214
218
  tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
agents/py.typed ADDED
@@ -0,0 +1 @@
1
+