iflow-mcp_lroolle-agents-mcp-server 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/.cursor/rules/DONE.mdc +31 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/.cursor/rules/WIP.mdc +37 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/.cursor/rules/notes.mdc +78 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/.gitignore +10 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/.python-version +1 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/3287_process.log +14 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/Dockerfile +23 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/PKG-INFO +127 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/README.md +111 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/language.json +1 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/package_name +1 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/push_info.json +5 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/pyproject.toml +60 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/smithery.yaml +31 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/src/agents_mcp_server/__init__.py +9 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/src/agents_mcp_server/__main__.py +31 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/src/agents_mcp_server/cli.py +148 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/src/agents_mcp_server/server.py +389 -0
- iflow_mcp_lroolle_agents_mcp_server-0.1.0/uv.lock +745 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: DONE
|
|
3
|
+
globs: *
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Completed Tasks
|
|
7
|
+
- [x] Set up the basic project structure
|
|
8
|
+
- [x] Create server.py, cli.py, and __main__.py files
|
|
9
|
+
- [x] Update pyproject.toml with dependencies
|
|
10
|
+
- [x] Create README.md with project description and usage instructions
|
|
11
|
+
- [x] Implement the MCP server using FastMCP
|
|
12
|
+
- [x] Define the server class and configuration
|
|
13
|
+
- [x] Implement OpenAI agents tools integration
|
|
14
|
+
- [x] Add error handling and logging
|
|
15
|
+
- [x] Add CLI for running and installing the server
|
|
16
|
+
- [x] Implement command-line interface
|
|
17
|
+
- [x] Add installation command for Claude desktop app
|
|
18
|
+
- [x] Design and implement agent-based approach
|
|
19
|
+
- [x] Create specialized agents for each tool
|
|
20
|
+
- [x] Create orchestrator agent that can coordinate between specialized agents
|
|
21
|
+
- [x] Update server.py to expose agents via MCP
|
|
22
|
+
- [x] Update README with agent-based documentation
|
|
23
|
+
- [x] Fix tool initialization
|
|
24
|
+
- [x] Implement dynamic creation of the FileSearchTool with client-provided vector_store_ids
|
|
25
|
+
- [x] Create SimpleAsyncComputer implementation for ComputerTool
|
|
26
|
+
- [x] Update documentation with implementation details
|
|
27
|
+
- [x] Enhance AsyncComputer implementation
|
|
28
|
+
- [x] Implement all required abstract methods from AsyncComputer
|
|
29
|
+
- [x] Add state tracking for simulation (cursor position, screen dimensions)
|
|
30
|
+
- [x] Improve command handling with parsing for different command types
|
|
31
|
+
- [x] Add detailed logging/output for simulated actions
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: WIP
|
|
3
|
+
globs: *
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Work In Progress
|
|
7
|
+
- [x] Integrate OpenAI Agents SDK tools
|
|
8
|
+
- [x] Implement WebSearchTool
|
|
9
|
+
- [x] Implement FileSearchTool
|
|
10
|
+
- [x] Implement ComputerTool
|
|
11
|
+
- [x] Design and implement agent-based approach
|
|
12
|
+
- [x] Create specialized agents for each tool
|
|
13
|
+
- [x] Create orchestrator agent that can coordinate between specialized agents
|
|
14
|
+
- [x] Update server.py to expose agents via MCP
|
|
15
|
+
- [x] Update README with agent-based documentation
|
|
16
|
+
- [x] Enhance AsyncComputer implementation
|
|
17
|
+
- [x] Implement all required abstract methods
|
|
18
|
+
- [x] Add state tracking for simulation
|
|
19
|
+
- [x] Improve command handling functionality
|
|
20
|
+
- [x] Add detailed logging/output for debugging
|
|
21
|
+
- [ ] Improve error handling and logging
|
|
22
|
+
- [ ] Add structured logging
|
|
23
|
+
- [ ] Add error reporting
|
|
24
|
+
- [ ] Add tests
|
|
25
|
+
- [ ] Unit tests
|
|
26
|
+
- [ ] Integration tests
|
|
27
|
+
- [x] Set up the basic project structure
|
|
28
|
+
- [x] Create server.py, cli.py, and __main__.py files
|
|
29
|
+
- [x] Update pyproject.toml with dependencies
|
|
30
|
+
- [x] Create README.md with project description and usage instructions
|
|
31
|
+
- [ ] Implement the MCP server using FastMCP
|
|
32
|
+
- [x] Define the server class and configuration
|
|
33
|
+
- [x] Implement OpenAI agents tools integration
|
|
34
|
+
- [ ] Add error handling and logging
|
|
35
|
+
- [ ] Add CLI for running and installing the server
|
|
36
|
+
- [x] Implement command-line interface
|
|
37
|
+
- [x] Add installation command for Claude desktop app
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Write my damn notes!
|
|
3
|
+
globs: *
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# MCP Server Project Notes
|
|
7
|
+
|
|
8
|
+
## Project Structure
|
|
9
|
+
```
|
|
10
|
+
.
|
|
11
|
+
├── .cursor
|
|
12
|
+
│ └── rules
|
|
13
|
+
│ ├── DONE.mdc
|
|
14
|
+
│ ├── WIP.mdc
|
|
15
|
+
│ └── notes.mdc
|
|
16
|
+
├── reference
|
|
17
|
+
│ ├── repomix-mcp-python-sdk.xml
|
|
18
|
+
│ └── repomix-openai-agents-python-docs.xml
|
|
19
|
+
├── src
|
|
20
|
+
│ └── agents_mcp_server
|
|
21
|
+
│ ├── __init__.py
|
|
22
|
+
│ ├── __main__.py
|
|
23
|
+
│ ├── cli.py
|
|
24
|
+
│ └── server.py
|
|
25
|
+
├── pyproject.toml
|
|
26
|
+
└── README.md
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Key Technologies
|
|
30
|
+
- MCP (Model Context Protocol): Protocol for connecting AI models with external tools and resources
|
|
31
|
+
- OpenAI Agents SDK: Framework for creating agents that can use tools
|
|
32
|
+
- uv: Package manager for Python (similar to poetry but better)
|
|
33
|
+
|
|
34
|
+
## Development Guidelines
|
|
35
|
+
- Use `uv add <pkg>` to add dependencies instead of directly modifying pyproject.toml
|
|
36
|
+
- Use `uv run <cmd-name>` to run CLI commands
|
|
37
|
+
- Avoid specifying version numbers when adding dependencies
|
|
38
|
+
|
|
39
|
+
## OpenAI Agents SDK Patterns
|
|
40
|
+
- [best-practice] Specialized agents are more effective for specific tasks than generalist agents
|
|
41
|
+
- [best-practice] The Agent.as_tool() method lets you convert agents into tools for other agents to use
|
|
42
|
+
- [best-practice] Always use trace() to improve observability of agent execution
|
|
43
|
+
- [design] The "agents-as-tools" pattern allows creating an orchestrator agent that delegates to specialist agents
|
|
44
|
+
- [gotcha] Be careful with agent instructions - they should be very clear and specific to get the best results
|
|
45
|
+
- [gotcha] Each agent instance maintains its own state, so for stateful tools, create a new agent for each request
|
|
46
|
+
- [gotcha] FileSearchTool requires a vector_store_ids parameter during initialization - this can't be null or empty
|
|
47
|
+
- [gotcha] ComputerTool requires a computer parameter (implementing AsyncComputer) during initialization
|
|
48
|
+
- [design] For tools that require client-provided parameters (like FileSearchTool) or implementation details (like ComputerTool), create the agent dynamically at request time rather than pre-initializing it
|
|
49
|
+
- [best-practice] Implement a simple placeholder for required interfaces when real implementation isn't available (e.g., SimpleAsyncComputer)
|
|
50
|
+
|
|
51
|
+
## AsyncComputer Implementation Insights
|
|
52
|
+
- [implementation] AsyncComputer is an abstract base class requiring methods for browser/desktop interaction
|
|
53
|
+
- [implementation] Required properties: environment (browser/desktop), dimensions (screen size)
|
|
54
|
+
- [implementation] Required methods: screenshot, click, double_click, scroll, type, wait, move, keypress, drag
|
|
55
|
+
- [best-practice] Maintain state in your AsyncComputer implementation (cursor position, current page, etc.)
|
|
56
|
+
- [best-practice] Implement robust command handling for high-level user instructions
|
|
57
|
+
- [best-practice] For testing/development, use proper simulation with meaningful outputs rather than empty implementations
|
|
58
|
+
- [tip] Real-world implementations can use Playwright (browser) or PyAutoGUI/similar (desktop)
|
|
59
|
+
- [design] LocalPlaywrightComputer in the OpenAI docs shows how to implement a real browser-based AsyncComputer
|
|
60
|
+
|
|
61
|
+
## Agent-Based Design Options
|
|
62
|
+
1. **Multi-capable single agent**: A single agent with access to multiple tools (simpler, but less specialized)
|
|
63
|
+
2. **Specialized agents with orchestrator**: Multiple agents each specialized in using one tool, with an orchestrator agent that delegates tasks (more complex, but more specialized and effective)
|
|
64
|
+
|
|
65
|
+
## Tool Requirements
|
|
66
|
+
- **WebSearchTool**: No required parameters, but can accept optional `user_location`
|
|
67
|
+
- **FileSearchTool**: Requires `vector_store_ids` (list of strings identifying vector stores)
|
|
68
|
+
- **ComputerTool**: Requires `computer` (an implementation of AsyncComputer interface)
|
|
69
|
+
|
|
70
|
+
## Implementation Plan
|
|
71
|
+
1. Set up the basic project structure ✅
|
|
72
|
+
2. Implement the MCP server using FastMCP ✅
|
|
73
|
+
3. Implement OpenAI agent tools integration ✅
|
|
74
|
+
4. Implement agent-based approach ✅
|
|
75
|
+
5. Add CLI for running and installing the server ✅
|
|
76
|
+
6. Create documentation and examples ✅
|
|
77
|
+
7. Add tests and improve error handling 🔄
|
|
78
|
+
8. Enhance AsyncComputer implementation ✅
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.11
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
[2026-02-06] [✅] 步骤1 - 获取项目: Fork和克隆仓库成功
|
|
2
|
+
[2026-02-06] [✅] 步骤2 - 阅读代码:
|
|
3
|
+
- 项目类型: MCP服务端项目
|
|
4
|
+
- 语言: Python
|
|
5
|
+
- 传输协议: stdio, sse
|
|
6
|
+
- 框架: FastMCP
|
|
7
|
+
- 入口点: agents_mcp_server:main
|
|
8
|
+
- 工具列表: web_search_agent, file_search_agent, computer_action_agent, multi_tool_agent
|
|
9
|
+
[2026-02-06] [✅] 步骤3 - 本地测试: 构建和测试成功
|
|
10
|
+
- 包名: iflow-mcp_lroolle-agents-mcp-server
|
|
11
|
+
- 入口: openai-agents-mcp-server
|
|
12
|
+
- 工具数量: 4
|
|
13
|
+
- 环境变量: OPENAI_API_KEY
|
|
14
|
+
[2026-02-06] [✅] 步骤4 - 推送分支: iflow分支推送成功
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
|
|
2
|
+
FROM python:3.11-slim
|
|
3
|
+
|
|
4
|
+
# set working directory
|
|
5
|
+
WORKDIR /app
|
|
6
|
+
|
|
7
|
+
# install build dependencies
|
|
8
|
+
RUN apt-get update && apt-get install -y gcc && rm -rf /var/lib/apt/lists/*
|
|
9
|
+
|
|
10
|
+
# copy project files
|
|
11
|
+
COPY . /app
|
|
12
|
+
|
|
13
|
+
# upgrade pip
|
|
14
|
+
RUN pip install --no-cache-dir --upgrade pip
|
|
15
|
+
|
|
16
|
+
# install project dependencies using hatchling build system
|
|
17
|
+
RUN pip install --no-cache-dir .
|
|
18
|
+
|
|
19
|
+
# expose port if using SSE transport (optional)
|
|
20
|
+
EXPOSE 5173
|
|
21
|
+
|
|
22
|
+
# default command
|
|
23
|
+
CMD ["openai-agents-mcp-server"]
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: iflow-mcp_lroolle-agents-mcp-server
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: MCP server for OpenAI agents and agents tools.
|
|
5
|
+
Author-email: Eric Wang <wrqatw@gmail.com>
|
|
6
|
+
Requires-Python: >=3.11
|
|
7
|
+
Requires-Dist: mcp
|
|
8
|
+
Requires-Dist: openai
|
|
9
|
+
Requires-Dist: openai-agents
|
|
10
|
+
Requires-Dist: pydantic
|
|
11
|
+
Requires-Dist: requests
|
|
12
|
+
Requires-Dist: rich
|
|
13
|
+
Requires-Dist: typer
|
|
14
|
+
Requires-Dist: uvicorn
|
|
15
|
+
Description-Content-Type: text/markdown
|
|
16
|
+
|
|
17
|
+
# OpenAI Agents MCP Server
|
|
18
|
+
[](https://smithery.ai/server/@lroolle/openai-agents-mcp-server)
|
|
19
|
+
|
|
20
|
+
A Model Context Protocol (MCP) server that exposes OpenAI agents through the MCP protocol.
|
|
21
|
+
|
|
22
|
+
## Features
|
|
23
|
+
|
|
24
|
+
This server exposes both individual agents and a multi-agent orchestrator using the OpenAI Agents SDK:
|
|
25
|
+
|
|
26
|
+
### Individual Specialized Agents
|
|
27
|
+
|
|
28
|
+
- **Web Search Agent**: A specialized agent for searching the web for real-time information
|
|
29
|
+
- **File Search Agent**: A specialized agent for searching and analyzing files in OpenAI's vector store
|
|
30
|
+
- **Computer Action Agent**: A specialized agent for performing actions on your computer safely
|
|
31
|
+
|
|
32
|
+
### Multi-Agent Orchestrator
|
|
33
|
+
|
|
34
|
+
- **Orchestrator Agent**: A powerful agent that can coordinate between the specialized agents, choosing the right one(s) for each task
|
|
35
|
+
|
|
36
|
+
Each agent is accessed through the MCP protocol, making them available to any MCP client, including the Claude desktop app.
|
|
37
|
+
|
|
38
|
+
## Installation
|
|
39
|
+
|
|
40
|
+
### Prerequisites
|
|
41
|
+
|
|
42
|
+
- Python 3.11 or higher
|
|
43
|
+
- [uv](https://github.com/astral-sh/uv) package manager (recommended)
|
|
44
|
+
- OpenAI API key
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
### Installing via Smithery
|
|
48
|
+
|
|
49
|
+
To install openai-agents-mcp-server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@lroolle/openai-agents-mcp-server):
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
npx -y @smithery/cli install @lroolle/openai-agents-mcp-server --client claude
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Claude Desktop
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
"mcpServers": {
|
|
59
|
+
"openai-agents-mcp-server": {
|
|
60
|
+
"command": "uvx",
|
|
61
|
+
"args": ["openai-agents-mcp-server"],
|
|
62
|
+
"env": {
|
|
63
|
+
"OPENAI_API_KEY": "your-api-key-here"
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
## Implementation Details
|
|
72
|
+
|
|
73
|
+
### Tool Requirements
|
|
74
|
+
|
|
75
|
+
- **WebSearchTool**: No required parameters, but can accept optional location context
|
|
76
|
+
- **FileSearchTool**: Requires vector_store_ids (IDs from your OpenAI vector stores)
|
|
77
|
+
- **ComputerTool**: Requires an AsyncComputer implementation (currently simulated)
|
|
78
|
+
|
|
79
|
+
### Customization
|
|
80
|
+
|
|
81
|
+
You can customize this server by:
|
|
82
|
+
|
|
83
|
+
1. Implementing a full AsyncComputer interface to enable real computer interactions
|
|
84
|
+
2. Adding additional specialized agents for other OpenAI tools
|
|
85
|
+
3. Enhancing the orchestrator agent to handle more complex workflows
|
|
86
|
+
|
|
87
|
+
## Configuration
|
|
88
|
+
|
|
89
|
+
You can configure the server using environment variables:
|
|
90
|
+
|
|
91
|
+
- `OPENAI_API_KEY`: Your OpenAI API key (required)
|
|
92
|
+
- `MCP_TRANSPORT`: Transport protocol to use (default: "stdio", can be "sse")
|
|
93
|
+
|
|
94
|
+
## Development
|
|
95
|
+
|
|
96
|
+
### Setup development environment
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
# Clone the repository
|
|
100
|
+
git clone https://github.com/lroolle/openai-agents-mcp-server.git
|
|
101
|
+
cd openai-agents-mcp-server
|
|
102
|
+
|
|
103
|
+
# Create a virtual environment
|
|
104
|
+
uv venv
|
|
105
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
106
|
+
|
|
107
|
+
# Install dependencies
|
|
108
|
+
uv sync --dev
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Testing with MCP Inspector
|
|
112
|
+
|
|
113
|
+
You can test the server using the MCP Inspector:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# In one terminal, run the server with SSE transport
|
|
117
|
+
export OPENAI_API_KEY=your-api-key
|
|
118
|
+
export MCP_TRANSPORT=sse
|
|
119
|
+
|
|
120
|
+
uv run mcp dev src/agents_mcp_server/server.py
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Then open a web browser and navigate to http://localhost:5173.
|
|
124
|
+
|
|
125
|
+
## License
|
|
126
|
+
|
|
127
|
+
MIT
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# OpenAI Agents MCP Server
|
|
2
|
+
[](https://smithery.ai/server/@lroolle/openai-agents-mcp-server)
|
|
3
|
+
|
|
4
|
+
A Model Context Protocol (MCP) server that exposes OpenAI agents through the MCP protocol.
|
|
5
|
+
|
|
6
|
+
## Features
|
|
7
|
+
|
|
8
|
+
This server exposes both individual agents and a multi-agent orchestrator using the OpenAI Agents SDK:
|
|
9
|
+
|
|
10
|
+
### Individual Specialized Agents
|
|
11
|
+
|
|
12
|
+
- **Web Search Agent**: A specialized agent for searching the web for real-time information
|
|
13
|
+
- **File Search Agent**: A specialized agent for searching and analyzing files in OpenAI's vector store
|
|
14
|
+
- **Computer Action Agent**: A specialized agent for performing actions on your computer safely
|
|
15
|
+
|
|
16
|
+
### Multi-Agent Orchestrator
|
|
17
|
+
|
|
18
|
+
- **Orchestrator Agent**: A powerful agent that can coordinate between the specialized agents, choosing the right one(s) for each task
|
|
19
|
+
|
|
20
|
+
Each agent is accessed through the MCP protocol, making them available to any MCP client, including the Claude desktop app.
|
|
21
|
+
|
|
22
|
+
## Installation
|
|
23
|
+
|
|
24
|
+
### Prerequisites
|
|
25
|
+
|
|
26
|
+
- Python 3.11 or higher
|
|
27
|
+
- [uv](https://github.com/astral-sh/uv) package manager (recommended)
|
|
28
|
+
- OpenAI API key
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
### Installing via Smithery
|
|
32
|
+
|
|
33
|
+
To install openai-agents-mcp-server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@lroolle/openai-agents-mcp-server):
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
npx -y @smithery/cli install @lroolle/openai-agents-mcp-server --client claude
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Claude Desktop
|
|
40
|
+
|
|
41
|
+
```
|
|
42
|
+
"mcpServers": {
|
|
43
|
+
"openai-agents-mcp-server": {
|
|
44
|
+
"command": "uvx",
|
|
45
|
+
"args": ["openai-agents-mcp-server"],
|
|
46
|
+
"env": {
|
|
47
|
+
"OPENAI_API_KEY": "your-api-key-here"
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
## Implementation Details
|
|
56
|
+
|
|
57
|
+
### Tool Requirements
|
|
58
|
+
|
|
59
|
+
- **WebSearchTool**: No required parameters, but can accept optional location context
|
|
60
|
+
- **FileSearchTool**: Requires vector_store_ids (IDs from your OpenAI vector stores)
|
|
61
|
+
- **ComputerTool**: Requires an AsyncComputer implementation (currently simulated)
|
|
62
|
+
|
|
63
|
+
### Customization
|
|
64
|
+
|
|
65
|
+
You can customize this server by:
|
|
66
|
+
|
|
67
|
+
1. Implementing a full AsyncComputer interface to enable real computer interactions
|
|
68
|
+
2. Adding additional specialized agents for other OpenAI tools
|
|
69
|
+
3. Enhancing the orchestrator agent to handle more complex workflows
|
|
70
|
+
|
|
71
|
+
## Configuration
|
|
72
|
+
|
|
73
|
+
You can configure the server using environment variables:
|
|
74
|
+
|
|
75
|
+
- `OPENAI_API_KEY`: Your OpenAI API key (required)
|
|
76
|
+
- `MCP_TRANSPORT`: Transport protocol to use (default: "stdio", can be "sse")
|
|
77
|
+
|
|
78
|
+
## Development
|
|
79
|
+
|
|
80
|
+
### Setup development environment
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
# Clone the repository
|
|
84
|
+
git clone https://github.com/lroolle/openai-agents-mcp-server.git
|
|
85
|
+
cd openai-agents-mcp-server
|
|
86
|
+
|
|
87
|
+
# Create a virtual environment
|
|
88
|
+
uv venv
|
|
89
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
90
|
+
|
|
91
|
+
# Install dependencies
|
|
92
|
+
uv sync --dev
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Testing with MCP Inspector
|
|
96
|
+
|
|
97
|
+
You can test the server using the MCP Inspector:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
# In one terminal, run the server with SSE transport
|
|
101
|
+
export OPENAI_API_KEY=your-api-key
|
|
102
|
+
export MCP_TRANSPORT=sse
|
|
103
|
+
|
|
104
|
+
uv run mcp dev src/agents_mcp_server/server.py
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
Then open a web browser and navigate to http://localhost:5173.
|
|
108
|
+
|
|
109
|
+
## License
|
|
110
|
+
|
|
111
|
+
MIT
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
python
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
iflow-mcp_lroolle-agents-mcp-server
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "iflow-mcp_lroolle-agents-mcp-server"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "MCP server for OpenAI agents and agents tools."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
authors = [{ name = "Eric Wang", email = "wrqatw@gmail.com" }]
|
|
7
|
+
requires-python = ">=3.11"
|
|
8
|
+
dependencies = [
|
|
9
|
+
"mcp",
|
|
10
|
+
"pydantic",
|
|
11
|
+
"openai",
|
|
12
|
+
"openai-agents",
|
|
13
|
+
"typer",
|
|
14
|
+
"rich",
|
|
15
|
+
"uvicorn",
|
|
16
|
+
"requests",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
[project.scripts]
|
|
20
|
+
openai-agents-mcp-server = "agents_mcp_server:main"
|
|
21
|
+
openai-agents-mcp-install = "agents_mcp_server.cli:app"
|
|
22
|
+
|
|
23
|
+
[build-system]
|
|
24
|
+
requires = ["hatchling"]
|
|
25
|
+
build-backend = "hatchling.build"
|
|
26
|
+
|
|
27
|
+
[dependency-groups]
|
|
28
|
+
dev = ["ipython>=9.0.2"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
[tool.isort]
|
|
32
|
+
profile = "black"
|
|
33
|
+
line_length = 100 # Keep same with black.line-length
|
|
34
|
+
multi_line_output = 3
|
|
35
|
+
|
|
36
|
+
[tool.black]
|
|
37
|
+
line-length = 100 # Too short with default=80
|
|
38
|
+
target-version = ["py39"]
|
|
39
|
+
skip-magic-trailing-comma = true
|
|
40
|
+
include = '\.pyi?$'
|
|
41
|
+
force-exclude = '''
|
|
42
|
+
/(
|
|
43
|
+
\.git
|
|
44
|
+
| \.hg
|
|
45
|
+
| \.mypy_cache
|
|
46
|
+
| \.pytest_cache
|
|
47
|
+
| \.tox
|
|
48
|
+
| \.venv
|
|
49
|
+
| _build
|
|
50
|
+
| buck-out
|
|
51
|
+
| build
|
|
52
|
+
| dist
|
|
53
|
+
| migrations
|
|
54
|
+
| fixture
|
|
55
|
+
| fixtures
|
|
56
|
+
)/
|
|
57
|
+
'''
|
|
58
|
+
|
|
59
|
+
[tool.hatch.build.targets.wheel]
|
|
60
|
+
packages = ["src/agents_mcp_server"]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
|
|
2
|
+
|
|
3
|
+
startCommand:
|
|
4
|
+
type: stdio
|
|
5
|
+
configSchema:
|
|
6
|
+
# JSON Schema defining the configuration options for the MCP.
|
|
7
|
+
type: object
|
|
8
|
+
required:
|
|
9
|
+
- openaiApiKey
|
|
10
|
+
properties:
|
|
11
|
+
openaiApiKey:
|
|
12
|
+
type: string
|
|
13
|
+
description: Your OpenAI API key.
|
|
14
|
+
mcpTransport:
|
|
15
|
+
type: string
|
|
16
|
+
default: stdio
|
|
17
|
+
description: Transport protocol to use, either 'stdio' or 'sse'.
|
|
18
|
+
commandFunction:
|
|
19
|
+
# A JS function that produces the CLI command based on the given config to start the MCP on stdio.
|
|
20
|
+
|-
|
|
21
|
+
(config) => ({
|
|
22
|
+
command: 'openai-agents-mcp-server',
|
|
23
|
+
args: [],
|
|
24
|
+
env: {
|
|
25
|
+
OPENAI_API_KEY: config.openaiApiKey,
|
|
26
|
+
MCP_TRANSPORT: config.mcpTransport
|
|
27
|
+
}
|
|
28
|
+
})
|
|
29
|
+
exampleConfig:
|
|
30
|
+
openaiApiKey: sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
|
31
|
+
mcpTransport: stdio
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main entry point for the agents-mcp-server.
|
|
3
|
+
|
|
4
|
+
This module provides the main entry point for running the MCP server.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
|
|
10
|
+
from .server import mcp
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main() -> None:
|
|
14
|
+
"""Run the MCP server."""
|
|
15
|
+
# Check if the OpenAI API key is set
|
|
16
|
+
if not os.environ.get("OPENAI_API_KEY"):
|
|
17
|
+
print("Error: OPENAI_API_KEY environment variable is not set.")
|
|
18
|
+
print("Please set it before running the server.")
|
|
19
|
+
sys.exit(1)
|
|
20
|
+
|
|
21
|
+
# Get the transport from environment variables or use default
|
|
22
|
+
transport = os.environ.get("MCP_TRANSPORT", "stdio")
|
|
23
|
+
|
|
24
|
+
print(f"Starting OpenAI Agents MCP server with {transport} transport")
|
|
25
|
+
|
|
26
|
+
# Run the server using the FastMCP's run method
|
|
27
|
+
mcp.run(transport=transport)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
if __name__ == "__main__":
|
|
31
|
+
main()
|