iflow-mcp_lroolle-agents-mcp-server 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents_mcp_server/__init__.py +9 -0
- agents_mcp_server/__main__.py +31 -0
- agents_mcp_server/cli.py +148 -0
- agents_mcp_server/server.py +389 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/METADATA +127 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/RECORD +8 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/WHEEL +4 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main entry point for the agents-mcp-server.
|
|
3
|
+
|
|
4
|
+
This module provides the main entry point for running the MCP server.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
|
|
10
|
+
from .server import mcp
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main() -> None:
|
|
14
|
+
"""Run the MCP server."""
|
|
15
|
+
# Check if the OpenAI API key is set
|
|
16
|
+
if not os.environ.get("OPENAI_API_KEY"):
|
|
17
|
+
print("Error: OPENAI_API_KEY environment variable is not set.")
|
|
18
|
+
print("Please set it before running the server.")
|
|
19
|
+
sys.exit(1)
|
|
20
|
+
|
|
21
|
+
# Get the transport from environment variables or use default
|
|
22
|
+
transport = os.environ.get("MCP_TRANSPORT", "stdio")
|
|
23
|
+
|
|
24
|
+
print(f"Starting OpenAI Agents MCP server with {transport} transport")
|
|
25
|
+
|
|
26
|
+
# Run the server using the FastMCP's run method
|
|
27
|
+
mcp.run(transport=transport)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
if __name__ == "__main__":
|
|
31
|
+
main()
|
agents_mcp_server/cli.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for the agentstool-mcp-server.
|
|
3
|
+
|
|
4
|
+
This module provides a command-line interface for running and installing the MCP server.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import getpass
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import platform
|
|
11
|
+
import sys
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
import typer
|
|
16
|
+
from openai import OpenAI
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
|
|
19
|
+
app = typer.Typer(help="OpenAI Agents MCP Server CLI")
|
|
20
|
+
console = Console()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def which(cmd: str, path: Optional[str] = None) -> Optional[str]:
|
|
24
|
+
"""Find the path to an executable."""
|
|
25
|
+
if path is None:
|
|
26
|
+
path = os.environ.get("PATH", "")
|
|
27
|
+
|
|
28
|
+
if platform.system() == "Windows":
|
|
29
|
+
cmd = cmd + ".exe"
|
|
30
|
+
paths = path.split(";")
|
|
31
|
+
else:
|
|
32
|
+
paths = path.split(":")
|
|
33
|
+
|
|
34
|
+
for p in paths:
|
|
35
|
+
cmd_path = os.path.join(p, cmd)
|
|
36
|
+
if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):
|
|
37
|
+
return cmd_path
|
|
38
|
+
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def update_claude_config(
|
|
43
|
+
server_name: str, command: str, args: List[str], env_vars: Optional[Dict[str, str]] = None
|
|
44
|
+
) -> bool:
|
|
45
|
+
"""Update the Claude desktop app configuration to include this server."""
|
|
46
|
+
# Find the Claude config file
|
|
47
|
+
config_file = None
|
|
48
|
+
if platform.system() == "Darwin": # macOS
|
|
49
|
+
config_file = Path(
|
|
50
|
+
Path.home(), "Library", "Application Support", "Claude", "servers_config.json"
|
|
51
|
+
)
|
|
52
|
+
elif platform.system() == "Windows":
|
|
53
|
+
config_file = Path(Path.home(), "AppData", "Roaming", "Claude", "servers_config.json")
|
|
54
|
+
elif platform.system() == "Linux":
|
|
55
|
+
config_file = Path(Path.home(), ".config", "Claude", "servers_config.json")
|
|
56
|
+
|
|
57
|
+
if not config_file:
|
|
58
|
+
console.print("[bold red]Error:[/] Could not determine Claude config file location.")
|
|
59
|
+
return False
|
|
60
|
+
|
|
61
|
+
# Create the config file if it doesn't exist
|
|
62
|
+
config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
63
|
+
if not config_file.exists():
|
|
64
|
+
config_file.write_text("{}")
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
config = json.loads(config_file.read_text())
|
|
68
|
+
if "mcpServers" not in config:
|
|
69
|
+
config["mcpServers"] = {}
|
|
70
|
+
|
|
71
|
+
# Always preserve existing env vars and merge with new ones
|
|
72
|
+
if server_name in config["mcpServers"] and "env" in config["mcpServers"][server_name]:
|
|
73
|
+
existing_env = config["mcpServers"][server_name]["env"]
|
|
74
|
+
if env_vars:
|
|
75
|
+
# New vars take precedence over existing ones
|
|
76
|
+
env_vars = {**existing_env, **env_vars}
|
|
77
|
+
else:
|
|
78
|
+
env_vars = existing_env
|
|
79
|
+
|
|
80
|
+
server_config = {"command": command, "args": args}
|
|
81
|
+
|
|
82
|
+
# Add environment variables if specified
|
|
83
|
+
if env_vars:
|
|
84
|
+
server_config["env"] = env_vars
|
|
85
|
+
|
|
86
|
+
config["mcpServers"][server_name] = server_config
|
|
87
|
+
|
|
88
|
+
config_file.write_text(json.dumps(config, indent=2))
|
|
89
|
+
console.print(f"[bold green]Success:[/] Added server '{server_name}' to Claude config")
|
|
90
|
+
return True
|
|
91
|
+
except Exception as e:
|
|
92
|
+
console.print(f"[bold red]Error:[/] Failed to update Claude config: {str(e)}")
|
|
93
|
+
return False
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@app.command()
|
|
97
|
+
def run() -> None:
|
|
98
|
+
"""Run the OpenAI Agents Tool MCP server."""
|
|
99
|
+
from . import main
|
|
100
|
+
|
|
101
|
+
main()
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@app.command()
|
|
105
|
+
def install() -> None:
|
|
106
|
+
"""Install the server in the Claude desktop app."""
|
|
107
|
+
name = "openai-agents-mcp"
|
|
108
|
+
|
|
109
|
+
env_dict = {}
|
|
110
|
+
local_bin = Path(Path.home(), ".local", "bin")
|
|
111
|
+
pyenv_shims = Path(Path.home(), ".pyenv", "shims")
|
|
112
|
+
path = os.environ["PATH"]
|
|
113
|
+
python_version = platform.python_version()
|
|
114
|
+
python_bin = Path(Path.home(), "Library", "Python", python_version, "bin")
|
|
115
|
+
|
|
116
|
+
if platform.system() == "Windows":
|
|
117
|
+
env_dict["PATH"] = f"{local_bin};{pyenv_shims};{python_bin};{path}"
|
|
118
|
+
else:
|
|
119
|
+
env_dict["PATH"] = f"{local_bin}:{pyenv_shims}:{python_bin}:{path}"
|
|
120
|
+
|
|
121
|
+
api_key = os.environ.get("OPENAI_API_KEY", "")
|
|
122
|
+
while not api_key:
|
|
123
|
+
api_key = getpass.getpass("Enter your OpenAI API key: ")
|
|
124
|
+
if api_key:
|
|
125
|
+
client = OpenAI(api_key=api_key)
|
|
126
|
+
try:
|
|
127
|
+
client.models.list()
|
|
128
|
+
except Exception as e:
|
|
129
|
+
console.print(
|
|
130
|
+
f"[bold red]Error:[/] Failed to authenticate with OpenAI API: {str(e)}"
|
|
131
|
+
)
|
|
132
|
+
api_key = ""
|
|
133
|
+
|
|
134
|
+
env_dict["OPENAI_API_KEY"] = api_key
|
|
135
|
+
|
|
136
|
+
uv = which("uvx", path=env_dict["PATH"])
|
|
137
|
+
command = uv if uv else "uvx"
|
|
138
|
+
args = [name]
|
|
139
|
+
|
|
140
|
+
if update_claude_config(name, command, args, env_vars=env_dict):
|
|
141
|
+
console.print(f"[bold green]Success:[/] Successfully installed {name} in Claude app")
|
|
142
|
+
else:
|
|
143
|
+
console.print(f"[bold red]Error:[/] Failed to install {name} in Claude app")
|
|
144
|
+
sys.exit(1)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
if __name__ == "__main__":
|
|
148
|
+
app()
|
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP server for OpenAI agents tools.
|
|
3
|
+
|
|
4
|
+
This module provides a FastMCP server that exposes OpenAI agents through the Model Context Protocol.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import base64
|
|
9
|
+
from typing import Any, Dict, List, Literal, Optional
|
|
10
|
+
|
|
11
|
+
from agents import Agent, AsyncComputer, ComputerTool, FileSearchTool, Runner, WebSearchTool, trace
|
|
12
|
+
from mcp.server.fastmcp import FastMCP
|
|
13
|
+
from pydantic import BaseModel, Field
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class SimpleAsyncComputer(AsyncComputer):
|
|
17
|
+
"""
|
|
18
|
+
A simple implementation of the AsyncComputer interface that simulates computer actions.
|
|
19
|
+
|
|
20
|
+
In a real implementation, you would use a browser automation library like Playwright
|
|
21
|
+
or a system automation tool to actually perform these actions on the computer.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
"""Initialize the SimpleAsyncComputer."""
|
|
26
|
+
self._screen_width = 1024
|
|
27
|
+
self._screen_height = 768
|
|
28
|
+
self._cursor_x = 0
|
|
29
|
+
self._cursor_y = 0
|
|
30
|
+
self._current_page = "https://bing.com"
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def environment(self) -> Literal["browser", "desktop"]:
|
|
34
|
+
"""Return the environment type of this computer."""
|
|
35
|
+
return "browser"
|
|
36
|
+
|
|
37
|
+
@property
|
|
38
|
+
def dimensions(self) -> tuple[int, int]:
|
|
39
|
+
"""Return the dimensions of the screen."""
|
|
40
|
+
return (self._screen_width, self._screen_height)
|
|
41
|
+
|
|
42
|
+
async def screenshot(self) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Capture a screenshot and return it as a base64-encoded string.
|
|
45
|
+
|
|
46
|
+
In a real implementation, this would capture an actual screenshot.
|
|
47
|
+
"""
|
|
48
|
+
placeholder_png = base64.b64decode(
|
|
49
|
+
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="
|
|
50
|
+
)
|
|
51
|
+
return base64.b64encode(placeholder_png).decode("utf-8")
|
|
52
|
+
|
|
53
|
+
async def click(
|
|
54
|
+
self, x: int, y: int, button: Literal["left", "middle", "right"] = "left"
|
|
55
|
+
) -> None:
|
|
56
|
+
"""Simulate clicking at the specified coordinates."""
|
|
57
|
+
self._cursor_x = x
|
|
58
|
+
self._cursor_y = y
|
|
59
|
+
print(f"Simulated {button} click at ({x}, {y})")
|
|
60
|
+
|
|
61
|
+
async def double_click(self, x: int, y: int) -> None:
|
|
62
|
+
"""Simulate double-clicking at the specified coordinates."""
|
|
63
|
+
self._cursor_x = x
|
|
64
|
+
self._cursor_y = y
|
|
65
|
+
print(f"Simulated double click at ({x}, {y})")
|
|
66
|
+
|
|
67
|
+
async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None:
|
|
68
|
+
"""Simulate scrolling from the specified position."""
|
|
69
|
+
self._cursor_x = x
|
|
70
|
+
self._cursor_y = y
|
|
71
|
+
print(f"Simulated scroll at ({x}, {y}) by ({scroll_x}, {scroll_y})")
|
|
72
|
+
|
|
73
|
+
async def type(self, text: str) -> None:
|
|
74
|
+
"""Simulate typing the specified text."""
|
|
75
|
+
print(f"Simulated typing: {text}")
|
|
76
|
+
|
|
77
|
+
async def wait(self) -> None:
|
|
78
|
+
"""Simulate waiting for a short period."""
|
|
79
|
+
await asyncio.sleep(1)
|
|
80
|
+
print("Waited for 1 second")
|
|
81
|
+
|
|
82
|
+
async def move(self, x: int, y: int) -> None:
|
|
83
|
+
"""Simulate moving the cursor to the specified coordinates."""
|
|
84
|
+
self._cursor_x = x
|
|
85
|
+
self._cursor_y = y
|
|
86
|
+
print(f"Moved cursor to ({x}, {y})")
|
|
87
|
+
|
|
88
|
+
async def keypress(self, keys: list[str]) -> None:
|
|
89
|
+
"""Simulate pressing the specified keys."""
|
|
90
|
+
print(f"Simulated keypress: {', '.join(keys)}")
|
|
91
|
+
|
|
92
|
+
async def drag(self, path: list[tuple[int, int]]) -> None:
|
|
93
|
+
"""Simulate dragging the cursor along the specified path."""
|
|
94
|
+
if not path:
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
self._cursor_x = path[0][0]
|
|
98
|
+
self._cursor_y = path[0][1]
|
|
99
|
+
print(f"Started drag at ({self._cursor_x}, {self._cursor_y})")
|
|
100
|
+
|
|
101
|
+
for x, y in path[1:]:
|
|
102
|
+
self._cursor_x = x
|
|
103
|
+
self._cursor_y = y
|
|
104
|
+
|
|
105
|
+
print(f"Ended drag at ({self._cursor_x}, {self._cursor_y})")
|
|
106
|
+
|
|
107
|
+
async def run_command(self, command: str) -> str:
|
|
108
|
+
"""
|
|
109
|
+
Simulate running a command and return the output.
|
|
110
|
+
|
|
111
|
+
In a real implementation, this could execute shell commands
|
|
112
|
+
or perform actions based on high-level instructions.
|
|
113
|
+
"""
|
|
114
|
+
print(f"Simulating command: {command}")
|
|
115
|
+
|
|
116
|
+
if command.startswith("open "):
|
|
117
|
+
app = command[5:].strip()
|
|
118
|
+
return f"Opened {app}"
|
|
119
|
+
elif command.startswith("search "):
|
|
120
|
+
query = command[7:].strip()
|
|
121
|
+
self._current_page = f"https://bing.com/search?q={query}"
|
|
122
|
+
return f"Searched for '{query}'"
|
|
123
|
+
elif command.startswith("navigate "):
|
|
124
|
+
url = command[9:].strip()
|
|
125
|
+
self._current_page = url
|
|
126
|
+
return f"Navigated to {url}"
|
|
127
|
+
else:
|
|
128
|
+
return f"Executed: {command}"
|
|
129
|
+
|
|
130
|
+
async def get_screenshot(self) -> bytes:
|
|
131
|
+
"""Get a screenshot of the current screen as raw bytes."""
|
|
132
|
+
return base64.b64decode(
|
|
133
|
+
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
mcp = FastMCP(
|
|
138
|
+
name="OpenAI Agents",
|
|
139
|
+
instructions="This MCP server provides access to OpenAI agents through the Model Context Protocol.",
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class AgentResponse(BaseModel):
|
|
144
|
+
"""Response from an OpenAI agent."""
|
|
145
|
+
|
|
146
|
+
response: str = Field(..., description="The response from the agent")
|
|
147
|
+
raw_response: Optional[Dict[str, Any]] = Field(
|
|
148
|
+
None, description="The raw response data from the agent, if available"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
web_search_agent = Agent(
|
|
153
|
+
name="Web Search Assistant",
|
|
154
|
+
instructions="""You are a web search assistant. Your primary goal is to search the web for accurate,
|
|
155
|
+
up-to-date information based on the user's query.
|
|
156
|
+
|
|
157
|
+
Guidelines:
|
|
158
|
+
1. Always use the web search tool to find information
|
|
159
|
+
2. Cite your sources when providing information
|
|
160
|
+
3. If information seems outdated or contradictory, acknowledge this
|
|
161
|
+
4. Summarize information in a clear, concise manner
|
|
162
|
+
5. For complex topics, break down information into digestible parts
|
|
163
|
+
""",
|
|
164
|
+
tools=[WebSearchTool()],
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
file_search_instructions = """You are a file search assistant. Your primary goal is to search through files and documents
|
|
168
|
+
to find relevant information based on the user's query.
|
|
169
|
+
|
|
170
|
+
Guidelines:
|
|
171
|
+
1. Always use the file search tool to find documents
|
|
172
|
+
2. Quote relevant sections from documents when appropriate
|
|
173
|
+
3. Summarize document content clearly
|
|
174
|
+
4. If multiple documents are found, compare and contrast their information
|
|
175
|
+
5. If no relevant documents are found, clearly state this
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
computer_action_instructions = """You are a computer action assistant. Your primary goal is to help users perform
|
|
179
|
+
actions on their computer safely and effectively.
|
|
180
|
+
|
|
181
|
+
Guidelines:
|
|
182
|
+
1. Always use the computer tool to perform actions
|
|
183
|
+
2. Prioritize safety and security in all actions
|
|
184
|
+
3. Verify user intentions before performing potentially destructive actions
|
|
185
|
+
4. Provide clear feedback about actions taken
|
|
186
|
+
5. If an action cannot be performed, explain why and suggest alternatives
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
@mcp.tool(
|
|
191
|
+
name="web_search_agent",
|
|
192
|
+
description="Use an AI agent specialized in web searching to find accurate, up-to-date information from the internet.",
|
|
193
|
+
)
|
|
194
|
+
async def web_search(
|
|
195
|
+
query: str = Field(
|
|
196
|
+
..., description="The search query or question you want to find information about online."
|
|
197
|
+
),
|
|
198
|
+
location: Optional[str] = Field(
|
|
199
|
+
None,
|
|
200
|
+
description="Optional location context for location-specific searches (e.g., 'New York').",
|
|
201
|
+
),
|
|
202
|
+
) -> AgentResponse:
|
|
203
|
+
"""Use a specialized web search agent powered by OpenAI to find information on the internet."""
|
|
204
|
+
try:
|
|
205
|
+
agent = web_search_agent
|
|
206
|
+
if location:
|
|
207
|
+
agent = Agent(
|
|
208
|
+
name="Web Search Assistant",
|
|
209
|
+
instructions=web_search_agent.instructions,
|
|
210
|
+
tools=[WebSearchTool(user_location={"type": "approximate", "city": location})],
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
with trace("Web search agent execution"):
|
|
214
|
+
result = await Runner.run(agent, query)
|
|
215
|
+
|
|
216
|
+
return AgentResponse(
|
|
217
|
+
response=result.final_output,
|
|
218
|
+
raw_response={"items": [str(item) for item in result.new_items]},
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
except Exception as e:
|
|
222
|
+
print(f"Error running web search agent: {e}")
|
|
223
|
+
return AgentResponse(
|
|
224
|
+
response=f"An error occurred while searching the web: {str(e)}", raw_response=None
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
@mcp.tool(
|
|
229
|
+
name="file_search_agent",
|
|
230
|
+
description="Use an AI agent specialized in searching through files and documents to find relevant information.",
|
|
231
|
+
)
|
|
232
|
+
async def file_search(
|
|
233
|
+
query: str = Field(..., description="The search query or question to find in the documents."),
|
|
234
|
+
vector_store_ids: List[str] = Field(
|
|
235
|
+
...,
|
|
236
|
+
description="The IDs of the vector stores to search in. This is required for file search to work.",
|
|
237
|
+
),
|
|
238
|
+
max_results: int = Field(5, description="The maximum number of document results to return."),
|
|
239
|
+
) -> AgentResponse:
|
|
240
|
+
"""Use a specialized file search agent powered by OpenAI to find information in documents."""
|
|
241
|
+
try:
|
|
242
|
+
agent = Agent(
|
|
243
|
+
name="File Search Assistant",
|
|
244
|
+
instructions=file_search_instructions,
|
|
245
|
+
tools=[FileSearchTool(max_num_results=max_results, vector_store_ids=vector_store_ids)],
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
with trace("File search agent execution"):
|
|
249
|
+
result = await Runner.run(agent, query)
|
|
250
|
+
|
|
251
|
+
return AgentResponse(
|
|
252
|
+
response=result.final_output,
|
|
253
|
+
raw_response={"items": [str(item) for item in result.new_items]},
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
except Exception as e:
|
|
257
|
+
print(f"Error running file search agent: {e}")
|
|
258
|
+
return AgentResponse(
|
|
259
|
+
response=f"An error occurred while searching files: {str(e)}", raw_response=None
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
@mcp.tool(
|
|
264
|
+
name="computer_action_agent",
|
|
265
|
+
description="Use an AI agent specialized in performing computer actions safely and effectively.",
|
|
266
|
+
)
|
|
267
|
+
async def computer_action(
|
|
268
|
+
action: str = Field(..., description="The action or task you want to perform on the computer.")
|
|
269
|
+
) -> AgentResponse:
|
|
270
|
+
"""Use a specialized computer action agent powered by OpenAI to perform actions on the computer."""
|
|
271
|
+
try:
|
|
272
|
+
computer = SimpleAsyncComputer()
|
|
273
|
+
|
|
274
|
+
agent = Agent(
|
|
275
|
+
name="Computer Action Assistant",
|
|
276
|
+
instructions=computer_action_instructions,
|
|
277
|
+
tools=[ComputerTool(computer=computer)],
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
with trace("Computer action agent execution"):
|
|
281
|
+
result = await Runner.run(agent, action)
|
|
282
|
+
|
|
283
|
+
return AgentResponse(
|
|
284
|
+
response=result.final_output,
|
|
285
|
+
raw_response={"items": [str(item) for item in result.new_items]},
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
except Exception as e:
|
|
289
|
+
print(f"Error running computer action agent: {e}")
|
|
290
|
+
return AgentResponse(
|
|
291
|
+
response=f"An error occurred while performing the computer action: {str(e)}",
|
|
292
|
+
raw_response=None,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
@mcp.tool(
|
|
297
|
+
name="multi_tool_agent",
|
|
298
|
+
description="Use an AI agent that can orchestrate between web search, file search, and computer actions based on your query.",
|
|
299
|
+
)
|
|
300
|
+
async def orchestrator_agent(
|
|
301
|
+
query: str = Field(..., description="The query or task you want help with."),
|
|
302
|
+
enable_web_search: bool = Field(True, description="Whether to enable web search capabilities."),
|
|
303
|
+
enable_file_search: bool = Field(
|
|
304
|
+
False, description="Whether to enable file search capabilities."
|
|
305
|
+
),
|
|
306
|
+
enable_computer_actions: bool = Field(
|
|
307
|
+
True, description="Whether to enable computer action capabilities."
|
|
308
|
+
),
|
|
309
|
+
vector_store_ids: Optional[List[str]] = Field(
|
|
310
|
+
None,
|
|
311
|
+
description="Required if enable_file_search is True. The IDs of the vector stores to search in.",
|
|
312
|
+
),
|
|
313
|
+
) -> AgentResponse:
|
|
314
|
+
"""Use a specialized orchestrator agent that can delegate to the most appropriate specialized agent."""
|
|
315
|
+
try:
|
|
316
|
+
tools = []
|
|
317
|
+
|
|
318
|
+
if enable_web_search:
|
|
319
|
+
tools.append(
|
|
320
|
+
web_search_agent.as_tool(
|
|
321
|
+
tool_name="search_web", tool_description="Search the web for information"
|
|
322
|
+
)
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
if enable_file_search:
|
|
326
|
+
if not vector_store_ids:
|
|
327
|
+
return AgentResponse(
|
|
328
|
+
response="Error: vector_store_ids is required when file search is enabled.",
|
|
329
|
+
raw_response=None,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
file_search_agent = Agent(
|
|
333
|
+
name="File Search Assistant",
|
|
334
|
+
instructions=file_search_instructions,
|
|
335
|
+
tools=[FileSearchTool(max_num_results=5, vector_store_ids=vector_store_ids)],
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
tools.append(
|
|
339
|
+
file_search_agent.as_tool(
|
|
340
|
+
tool_name="search_files",
|
|
341
|
+
tool_description="Search for information in files and documents",
|
|
342
|
+
)
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
if enable_computer_actions:
|
|
346
|
+
computer = SimpleAsyncComputer()
|
|
347
|
+
|
|
348
|
+
computer_action_agent = Agent(
|
|
349
|
+
name="Computer Action Assistant",
|
|
350
|
+
instructions=computer_action_instructions,
|
|
351
|
+
tools=[ComputerTool(computer=computer)],
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
tools.append(
|
|
355
|
+
computer_action_agent.as_tool(
|
|
356
|
+
tool_name="perform_computer_action",
|
|
357
|
+
tool_description="Perform an action on the computer",
|
|
358
|
+
)
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
orchestrator = Agent(
|
|
362
|
+
name="Multi-Tool Orchestrator",
|
|
363
|
+
instructions="""You are an intelligent orchestrator with access to specialized agents.
|
|
364
|
+
Based on the user's query, determine which specialized agent(s) can best help and
|
|
365
|
+
delegate the task to them.
|
|
366
|
+
|
|
367
|
+
Guidelines:
|
|
368
|
+
1. For queries about current events or facts, use the web search agent
|
|
369
|
+
2. For queries about documents or specific files, use the file search agent
|
|
370
|
+
3. For requests to perform actions on the computer, use the computer action agent
|
|
371
|
+
4. For complex requests, you can use multiple agents in sequence
|
|
372
|
+
5. Always explain your reasoning and which agent(s) you're using
|
|
373
|
+
""",
|
|
374
|
+
tools=tools,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
with trace("Orchestrator agent execution"):
|
|
378
|
+
result = await Runner.run(orchestrator, query)
|
|
379
|
+
|
|
380
|
+
return AgentResponse(
|
|
381
|
+
response=result.final_output,
|
|
382
|
+
raw_response={"items": [str(item) for item in result.new_items]},
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
except Exception as e:
|
|
386
|
+
print(f"Error running orchestrator agent: {e}")
|
|
387
|
+
return AgentResponse(
|
|
388
|
+
response=f"An error occurred while processing your request: {str(e)}", raw_response=None
|
|
389
|
+
)
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: iflow-mcp_lroolle-agents-mcp-server
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: MCP server for OpenAI agents and agents tools.
|
|
5
|
+
Author-email: Eric Wang <wrqatw@gmail.com>
|
|
6
|
+
Requires-Python: >=3.11
|
|
7
|
+
Requires-Dist: mcp
|
|
8
|
+
Requires-Dist: openai
|
|
9
|
+
Requires-Dist: openai-agents
|
|
10
|
+
Requires-Dist: pydantic
|
|
11
|
+
Requires-Dist: requests
|
|
12
|
+
Requires-Dist: rich
|
|
13
|
+
Requires-Dist: typer
|
|
14
|
+
Requires-Dist: uvicorn
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# OpenAI Agents MCP Server
|
|
18
|
+
[](https://smithery.ai/server/@lroolle/openai-agents-mcp-server)
|
|
19
|
+
|
|
20
|
+
A Model Context Protocol (MCP) server that exposes OpenAI agents through the MCP protocol.
|
|
21
|
+
|
|
22
|
+
## Features
|
|
23
|
+
|
|
24
|
+
This server exposes both individual agents and a multi-agent orchestrator using the OpenAI Agents SDK:
|
|
25
|
+
|
|
26
|
+
### Individual Specialized Agents
|
|
27
|
+
|
|
28
|
+
- **Web Search Agent**: A specialized agent for searching the web for real-time information
|
|
29
|
+
- **File Search Agent**: A specialized agent for searching and analyzing files in OpenAI's vector store
|
|
30
|
+
- **Computer Action Agent**: A specialized agent for performing actions on your computer safely
|
|
31
|
+
|
|
32
|
+
### Multi-Agent Orchestrator
|
|
33
|
+
|
|
34
|
+
- **Orchestrator Agent**: A powerful agent that can coordinate between the specialized agents, choosing the right one(s) for each task
|
|
35
|
+
|
|
36
|
+
Each agent is accessed through the MCP protocol, making them available to any MCP client, including the Claude desktop app.
|
|
37
|
+
|
|
38
|
+
## Installation
|
|
39
|
+
|
|
40
|
+
### Prerequisites
|
|
41
|
+
|
|
42
|
+
- Python 3.11 or higher
|
|
43
|
+
- [uv](https://github.com/astral-sh/uv) package manager (recommended)
|
|
44
|
+
- OpenAI API key
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
### Installing via Smithery
|
|
48
|
+
|
|
49
|
+
To install openai-agents-mcp-server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@lroolle/openai-agents-mcp-server):
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
npx -y @smithery/cli install @lroolle/openai-agents-mcp-server --client claude
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Claude Desktop
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
"mcpServers": {
|
|
59
|
+
"openai-agents-mcp-server": {
|
|
60
|
+
"command": "uvx",
|
|
61
|
+
"args": ["openai-agents-mcp-server"],
|
|
62
|
+
"env": {
|
|
63
|
+
"OPENAI_API_KEY": "your-api-key-here"
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
## Implementation Details
|
|
72
|
+
|
|
73
|
+
### Tool Requirements
|
|
74
|
+
|
|
75
|
+
- **WebSearchTool**: No required parameters, but can accept optional location context
|
|
76
|
+
- **FileSearchTool**: Requires vector_store_ids (IDs from your OpenAI vector stores)
|
|
77
|
+
- **ComputerTool**: Requires an AsyncComputer implementation (currently simulated)
|
|
78
|
+
|
|
79
|
+
### Customization
|
|
80
|
+
|
|
81
|
+
You can customize this server by:
|
|
82
|
+
|
|
83
|
+
1. Implementing a full AsyncComputer interface to enable real computer interactions
|
|
84
|
+
2. Adding additional specialized agents for other OpenAI tools
|
|
85
|
+
3. Enhancing the orchestrator agent to handle more complex workflows
|
|
86
|
+
|
|
87
|
+
## Configuration
|
|
88
|
+
|
|
89
|
+
You can configure the server using environment variables:
|
|
90
|
+
|
|
91
|
+
- `OPENAI_API_KEY`: Your OpenAI API key (required)
|
|
92
|
+
- `MCP_TRANSPORT`: Transport protocol to use (default: "stdio", can be "sse")
|
|
93
|
+
|
|
94
|
+
## Development
|
|
95
|
+
|
|
96
|
+
### Setup development environment
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
# Clone the repository
|
|
100
|
+
git clone https://github.com/lroolle/openai-agents-mcp-server.git
|
|
101
|
+
cd openai-agents-mcp-server
|
|
102
|
+
|
|
103
|
+
# Create a virtual environment
|
|
104
|
+
uv venv
|
|
105
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
106
|
+
|
|
107
|
+
# Install dependencies
|
|
108
|
+
uv sync --dev
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Testing with MCP Inspector
|
|
112
|
+
|
|
113
|
+
You can test the server using the MCP Inspector:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# In one terminal, run the server with SSE transport
|
|
117
|
+
export OPENAI_API_KEY=your-api-key
|
|
118
|
+
export MCP_TRANSPORT=sse
|
|
119
|
+
|
|
120
|
+
uv run mcp dev src/agents_mcp_server/server.py
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Then open a web browser and navigate to http://localhost:5173.
|
|
124
|
+
|
|
125
|
+
## License
|
|
126
|
+
|
|
127
|
+
MIT
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
agents_mcp_server/__init__.py,sha256=fxxEeKaLlkvWLFrVEKGw6MAlTOJQjw4DdQpJFHXt9tM,165
|
|
2
|
+
agents_mcp_server/__main__.py,sha256=OcRp9ey2fSsVoXgkGYqBj_CFhfstJI3kfI6L1zoFf0k,784
|
|
3
|
+
agents_mcp_server/cli.py,sha256=lH-ER6sZ-V9dtsm11rTdmTFxVnwsX76ZWYUBGHZrpsk,4715
|
|
4
|
+
agents_mcp_server/server.py,sha256=cpbtiDCDpOnx-6k3LilHcZpcWu-C0_v5G-bPrGHT_z4,14503
|
|
5
|
+
iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/METADATA,sha256=V_PXmoAdNhSqK6tLGPl3_juQ0mOyC0NwPpVejqQq3kg,3498
|
|
6
|
+
iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
7
|
+
iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/entry_points.txt,sha256=u8TGPJRsmT0z9CL3PCxEYCcjEvFtLET9gOqxZ5jifWg,122
|
|
8
|
+
iflow_mcp_lroolle_agents_mcp_server-0.1.0.dist-info/RECORD,,
|