mcp-server-mas-sequential-thinking 0.1.3__tar.gz → 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_server_mas_sequential_thinking-0.2.0/.env.example +27 -0
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/PKG-INFO +42 -12
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/README.md +40 -11
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/README.zh-CN.md +41 -68
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/main.py +111 -42
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/pyproject.toml +2 -9
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/uv.lock +169 -166
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/.gitignore +0 -0
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/.python-version +0 -0
@@ -0,0 +1,27 @@
|
|
1
|
+
# --- LLM Configuration ---
|
2
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
3
|
+
LLM_PROVIDER="deepseek"
|
4
|
+
|
5
|
+
# Provide the API key for the chosen provider:
|
6
|
+
# GROQ_API_KEY="your_groq_api_key"
|
7
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
8
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
9
|
+
|
10
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
11
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
12
|
+
|
13
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
14
|
+
# Defaults are set within the code based on the provider if these are not set.
|
15
|
+
# Example for Groq:
|
16
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
17
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
18
|
+
# Example for DeepSeek:
|
19
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
20
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
21
|
+
# Example for OpenRouter:
|
22
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
23
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
24
|
+
|
25
|
+
# --- External Tools ---
|
26
|
+
# Required ONLY if the Researcher agent is used and needs Exa
|
27
|
+
EXA_API_KEY="your_exa_api_key"
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/PKG-INFO
RENAMED
@@ -1,12 +1,13 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mcp-server-mas-sequential-thinking
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: MCP Agent Implementation for Sequential Thinking
|
5
5
|
Author-email: Frad LEE <fradser@gmail.com>
|
6
6
|
Requires-Python: >=3.10
|
7
7
|
Requires-Dist: agno
|
8
8
|
Requires-Dist: asyncio
|
9
9
|
Requires-Dist: exa-py
|
10
|
+
Requires-Dist: groq
|
10
11
|
Requires-Dist: mcp
|
11
12
|
Requires-Dist: python-dotenv
|
12
13
|
Provides-Extra: dev
|
@@ -80,8 +81,11 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
80
81
|
## Prerequisites
|
81
82
|
|
82
83
|
* Python 3.10+
|
83
|
-
* Access to a compatible LLM API (configured for `agno
|
84
|
-
*
|
84
|
+
* Access to a compatible LLM API (configured for `agno`). The system now supports:
|
85
|
+
* **Groq:** Requires `GROQ_API_KEY`.
|
86
|
+
* **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
|
87
|
+
* **OpenRouter:** Requires `OPENROUTER_API_KEY`.
|
88
|
+
* Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
|
85
89
|
* Exa API Key (if using the Researcher agent's capabilities)
|
86
90
|
* `EXA_API_KEY` environment variable.
|
87
91
|
* `uv` package manager (recommended) or `pip`.
|
@@ -90,7 +94,9 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
90
94
|
|
91
95
|
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
92
96
|
|
93
|
-
|
97
|
+
The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
98
|
+
|
99
|
+
```json
|
94
100
|
{
|
95
101
|
"mcpServers": {
|
96
102
|
"mas-sequential-thinking": {
|
@@ -98,14 +104,17 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
98
104
|
"args": [
|
99
105
|
"mcp-server-mas-sequential-thinking"
|
100
106
|
],
|
101
|
-
env": {
|
102
|
-
"
|
103
|
-
"
|
104
|
-
"
|
107
|
+
"env": {
|
108
|
+
"LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
|
109
|
+
// "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
|
110
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
|
111
|
+
// "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
|
112
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
|
113
|
+
"EXA_API_KEY": "your_exa_api_key" // Only if using Exa
|
105
114
|
}
|
106
115
|
}
|
107
116
|
}
|
108
|
-
}
|
117
|
+
}
|
109
118
|
```
|
110
119
|
|
111
120
|
## Installation & Setup
|
@@ -119,10 +128,31 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
119
128
|
2. **Set Environment Variables:**
|
120
129
|
Create a `.env` file in the root directory or export the variables:
|
121
130
|
```dotenv
|
122
|
-
#
|
123
|
-
|
124
|
-
|
131
|
+
# --- LLM Configuration ---
|
132
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
133
|
+
LLM_PROVIDER="deepseek"
|
125
134
|
|
135
|
+
# Provide the API key for the chosen provider:
|
136
|
+
# GROQ_API_KEY="your_groq_api_key"
|
137
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
138
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
139
|
+
|
140
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
141
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
142
|
+
|
143
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
144
|
+
# Defaults are set within the code based on the provider if these are not set.
|
145
|
+
# Example for Groq:
|
146
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
147
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
148
|
+
# Example for DeepSeek:
|
149
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
150
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
151
|
+
# Example for OpenRouter:
|
152
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
153
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
154
|
+
|
155
|
+
# --- External Tools ---
|
126
156
|
# Required ONLY if the Researcher agent is used and needs Exa
|
127
157
|
EXA_API_KEY="your_exa_api_key"
|
128
158
|
```
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/README.md
RENAMED
@@ -62,8 +62,11 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
62
62
|
## Prerequisites
|
63
63
|
|
64
64
|
* Python 3.10+
|
65
|
-
* Access to a compatible LLM API (configured for `agno
|
66
|
-
*
|
65
|
+
* Access to a compatible LLM API (configured for `agno`). The system now supports:
|
66
|
+
* **Groq:** Requires `GROQ_API_KEY`.
|
67
|
+
* **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
|
68
|
+
* **OpenRouter:** Requires `OPENROUTER_API_KEY`.
|
69
|
+
* Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
|
67
70
|
* Exa API Key (if using the Researcher agent's capabilities)
|
68
71
|
* `EXA_API_KEY` environment variable.
|
69
72
|
* `uv` package manager (recommended) or `pip`.
|
@@ -72,7 +75,9 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
72
75
|
|
73
76
|
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
74
77
|
|
75
|
-
|
78
|
+
The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
79
|
+
|
80
|
+
```json
|
76
81
|
{
|
77
82
|
"mcpServers": {
|
78
83
|
"mas-sequential-thinking": {
|
@@ -80,14 +85,17 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
80
85
|
"args": [
|
81
86
|
"mcp-server-mas-sequential-thinking"
|
82
87
|
],
|
83
|
-
env": {
|
84
|
-
"
|
85
|
-
"
|
86
|
-
"
|
88
|
+
"env": {
|
89
|
+
"LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
|
90
|
+
// "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
|
91
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
|
92
|
+
// "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
|
93
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
|
94
|
+
"EXA_API_KEY": "your_exa_api_key" // Only if using Exa
|
87
95
|
}
|
88
96
|
}
|
89
97
|
}
|
90
|
-
}
|
98
|
+
}
|
91
99
|
```
|
92
100
|
|
93
101
|
## Installation & Setup
|
@@ -101,10 +109,31 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
101
109
|
2. **Set Environment Variables:**
|
102
110
|
Create a `.env` file in the root directory or export the variables:
|
103
111
|
```dotenv
|
104
|
-
#
|
105
|
-
|
106
|
-
|
112
|
+
# --- LLM Configuration ---
|
113
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
114
|
+
LLM_PROVIDER="deepseek"
|
107
115
|
|
116
|
+
# Provide the API key for the chosen provider:
|
117
|
+
# GROQ_API_KEY="your_groq_api_key"
|
118
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
119
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
120
|
+
|
121
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
122
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
123
|
+
|
124
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
125
|
+
# Defaults are set within the code based on the provider if these are not set.
|
126
|
+
# Example for Groq:
|
127
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
128
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
129
|
+
# Example for DeepSeek:
|
130
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
131
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
132
|
+
# Example for OpenRouter:
|
133
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
134
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
135
|
+
|
136
|
+
# --- External Tools ---
|
108
137
|
# Required ONLY if the Researcher agent is used and needs Exa
|
109
138
|
EXA_API_KEY="your_exa_api_key"
|
110
139
|
```
|
@@ -63,8 +63,11 @@
|
|
63
63
|
## 先决条件
|
64
64
|
|
65
65
|
* Python 3.10+
|
66
|
-
* 访问兼容的 LLM API(为 `agno`
|
67
|
-
* `
|
66
|
+
* 访问兼容的 LLM API(为 `agno` 配置)。系统现在支持:
|
67
|
+
* **Groq:** 需要 `GROQ_API_KEY`。
|
68
|
+
* **DeepSeek:** 需要 `DEEPSEEK_API_KEY`。
|
69
|
+
* **OpenRouter:** 需要 `OPENROUTER_API_KEY`。
|
70
|
+
* 使用 `LLM_PROVIDER` 环境变量配置所需的提供商(默认为 `deepseek`)。
|
68
71
|
* Exa API 密钥(如果使用研究员智能体的功能)
|
69
72
|
* `EXA_API_KEY` 环境变量。
|
70
73
|
* `uv` 包管理器(推荐)或 `pip`。
|
@@ -73,6 +76,8 @@
|
|
73
76
|
|
74
77
|
此服务器作为标准可执行脚本运行,通过 stdio 进行通信,符合 MCP 的预期。确切的配置方法取决于您具体的 MCP 客户端实现。请查阅您客户端的文档以获取详细信息。
|
75
78
|
|
79
|
+
`env` 部分应包含您选择的 `LLM_PROVIDER` 对应的 API 密钥。
|
80
|
+
|
76
81
|
```json
|
77
82
|
{
|
78
83
|
"mcpServers": {
|
@@ -82,9 +87,12 @@
|
|
82
87
|
"mcp-server-mas-sequential-thinking"
|
83
88
|
],
|
84
89
|
"env": {
|
85
|
-
"
|
86
|
-
"
|
87
|
-
"
|
90
|
+
"LLM_PROVIDER": "deepseek", // 或 "groq", "openrouter"
|
91
|
+
// "GROQ_API_KEY": "你的_groq_api_密钥", // 仅当 LLM_PROVIDER="groq" 时需要
|
92
|
+
"DEEPSEEK_API_KEY": "你的_deepseek_api_密钥", // 默认提供商
|
93
|
+
// "OPENROUTER_API_KEY": "你的_openrouter_api_密钥", // 仅当 LLM_PROVIDER="openrouter" 时需要
|
94
|
+
"DEEPSEEK_BASE_URL": "你的_base_url_如果需要", // 可选:如果为 DeepSeek 使用自定义端点
|
95
|
+
"EXA_API_KEY": "你的_exa_api_密钥" // 仅当使用 Exa 时需要
|
88
96
|
}
|
89
97
|
}
|
90
98
|
}
|
@@ -104,10 +112,31 @@
|
|
104
112
|
在根目录创建一个 `.env` 文件或导出变量:
|
105
113
|
|
106
114
|
```dotenv
|
107
|
-
#
|
108
|
-
|
109
|
-
|
115
|
+
# --- LLM 配置 ---
|
116
|
+
# 选择 LLM 提供商: "deepseek" (默认), "groq", 或 "openrouter"
|
117
|
+
LLM_PROVIDER="deepseek"
|
110
118
|
|
119
|
+
# 提供所选提供商的 API 密钥:
|
120
|
+
# GROQ_API_KEY="你的_groq_api_密钥"
|
121
|
+
DEEPSEEK_API_KEY="你的_deepseek_api_密钥"
|
122
|
+
# OPENROUTER_API_KEY="你的_openrouter_api_密钥"
|
123
|
+
|
124
|
+
# 可选: 基础 URL 覆盖 (例如, 用于自定义 DeepSeek 端点)
|
125
|
+
DEEPSEEK_BASE_URL="你的_base_url_如果需要"
|
126
|
+
|
127
|
+
# 可选: 为团队协调器和专家智能体指定不同的模型
|
128
|
+
# 如果未设置这些变量,则代码会根据提供商设置默认值。
|
129
|
+
# Groq 示例:
|
130
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
131
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
132
|
+
# DeepSeek 示例:
|
133
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
134
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
135
|
+
# OpenRouter 示例:
|
136
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
137
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
138
|
+
|
139
|
+
# --- 外部工具 ---
|
111
140
|
# 仅当研究员智能体被使用且需要 Exa 时才必需
|
112
141
|
EXA_API_KEY="你的_exa_api_密钥"
|
113
142
|
```
|
@@ -165,64 +194,8 @@ python 你的主脚本名称.py
|
|
165
194
|
LLM 会迭代地与此工具交互:
|
166
195
|
|
167
196
|
1. **LLM:** 使用 `sequential-thinking-starter` 提示和问题。
|
168
|
-
2. **LLM:** 使用 `thoughtNumber: 1`、初始 `thought
|
169
|
-
3. **服务器:** MAS 处理思考 -\>
|
197
|
+
2. **LLM:** 使用 `thoughtNumber: 1`、初始 `thought`(例如,"规划分析...")、`totalThoughts` 预估、`nextThoughtNeeded: True` 调用 `sequentialthinking` 工具。
|
198
|
+
3. **服务器:** MAS 处理思考 -\> 协调器综合响应并提供指导(例如,"分析计划完成。建议下一步研究 X。暂不推荐修订。")。
|
170
199
|
4. **LLM:** 接收包含 `coordinatorResponse` 的 JSON 响应。
|
171
|
-
5. **LLM:** 根据 `coordinatorResponse`
|
172
|
-
6. **LLM:** 使用 `thoughtNumber: 2`、新的 `thought`、更新的 `totalThoughts`(如果需要)、`
|
173
|
-
7. **服务器:** MAS 处理 -\> 协调器综合(例如,“研究完成。发现表明思考 \#1 的假设存在缺陷。建议:修订思考 \#1...”)。
|
174
|
-
8. **LLM:** 接收响应,看到建议。
|
175
|
-
9. **LLM:** 构思一个修订思考。
|
176
|
-
10. **LLM:** 使用 `thoughtNumber: 3`、修订后的 `thought`、`isRevision: True`、`revisesThought: 1`、`nextThoughtNeeded: True` 调用 `sequentialthinking` 工具。
|
177
|
-
11. **... 以此类推,可能根据需要进行分支或扩展。**
|
178
|
-
|
179
|
-
### 工具响应格式
|
180
|
-
|
181
|
-
该工具返回一个 JSON 字符串,包含:
|
182
|
-
|
183
|
-
```json
|
184
|
-
{
|
185
|
-
"processedThoughtNumber": int, // 处理的思考编号
|
186
|
-
"estimatedTotalThoughts": int, // 预估总思考数
|
187
|
-
"nextThoughtNeeded": bool, // 是否需要下一个思考
|
188
|
-
"coordinatorResponse": "来自智能体团队的综合输出,包括分析、发现和下一步指导...", // 协调器的综合响应
|
189
|
-
"branches": ["分支ID列表"], // 所有分支 ID 的列表
|
190
|
-
"thoughtHistoryLength": int, // 思考历史长度
|
191
|
-
"branchDetails": { // 分支详情
|
192
|
-
"currentBranchId": "main | branchId", // 当前分支 ID
|
193
|
-
"branchOriginThought": null | int, // 分支起源的思考编号
|
194
|
-
"allBranches": {"main": 数量, "branchId": 数量, ...} // 所有分支及其包含的思考数
|
195
|
-
},
|
196
|
-
"isRevision": bool, // 是否为修订
|
197
|
-
"revisesThought": null | int, // 修订的思考编号
|
198
|
-
"isBranch": bool, // 是否为分支操作产生的思考
|
199
|
-
"status": "success | validation_error | failed", // 状态
|
200
|
-
"error": "如果状态不是 success 时的错误信息" // 可选
|
201
|
-
}
|
202
|
-
```
|
203
|
-
|
204
|
-
## 日志记录
|
205
|
-
|
206
|
-
* 日志写入 `~/.sequential_thinking/logs/sequential_thinking.log`。
|
207
|
-
* 使用 Python 标准的 `logging` 模块。
|
208
|
-
* 包含轮转文件处理器(10MB 限制,5 个备份)和控制台处理器(INFO 级别)。
|
209
|
-
* 日志包含时间戳、级别、记录器名称和消息,包括格式化的思考表示。
|
210
|
-
|
211
|
-
## 开发
|
212
|
-
|
213
|
-
(如果适用,在此处添加开发指南,例如设置开发环境、运行测试、代码检查等。)
|
214
|
-
|
215
|
-
1. 克隆仓库。
|
216
|
-
2. 设置虚拟环境。
|
217
|
-
3. 安装依赖项,可能包括开发附加项:
|
218
|
-
```bash
|
219
|
-
# 使用 uv
|
220
|
-
uv pip install -e ".[dev]"
|
221
|
-
# 使用 pip
|
222
|
-
pip install -e ".[dev]"
|
223
|
-
```
|
224
|
-
4. 运行代码检查器/格式化器/测试。
|
225
|
-
|
226
|
-
## 许可证
|
227
|
-
|
228
|
-
MIT
|
200
|
+
5. **LLM:** 根据 `coordinatorResponse` 构思下一个思考(例如,"使用 Exa 研究 X...")。
|
201
|
+
6. **LLM:** 使用 `thoughtNumber: 2`、新的 `thought`、更新的 `totalThoughts`(如果需要)、`
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/main.py
RENAMED
@@ -4,11 +4,14 @@ import sys
|
|
4
4
|
from contextlib import asynccontextmanager
|
5
5
|
from dataclasses import dataclass, field
|
6
6
|
from datetime import datetime
|
7
|
-
from typing import Any, AsyncIterator, Dict, List, Optional
|
7
|
+
from typing import Any, AsyncIterator, Dict, List, Optional, Type
|
8
8
|
|
9
9
|
from mcp.server.fastmcp import FastMCP
|
10
10
|
from agno.agent import Agent
|
11
|
+
from agno.models.base import Model
|
11
12
|
from agno.models.deepseek import DeepSeek
|
13
|
+
from agno.models.groq import Groq
|
14
|
+
from agno.models.openrouter import OpenRouter
|
12
15
|
from agno.team.team import Team
|
13
16
|
from agno.tools.exa import ExaTools
|
14
17
|
from agno.tools.thinking import ThinkingTools
|
@@ -16,12 +19,12 @@ from dotenv import load_dotenv
|
|
16
19
|
from pydantic import (BaseModel, ConfigDict, Field, ValidationError,
|
17
20
|
field_validator, model_validator)
|
18
21
|
|
19
|
-
# Add logging imports and setup
|
20
22
|
import logging
|
21
23
|
import logging.handlers
|
22
24
|
from pathlib import Path
|
23
25
|
|
24
|
-
|
26
|
+
load_dotenv()
|
27
|
+
|
25
28
|
def setup_logging() -> logging.Logger:
|
26
29
|
"""
|
27
30
|
Set up application logging with both file and console handlers.
|
@@ -66,14 +69,8 @@ def setup_logging() -> logging.Logger:
|
|
66
69
|
|
67
70
|
return logger
|
68
71
|
|
69
|
-
# Initialize logger
|
70
72
|
logger = setup_logging()
|
71
73
|
|
72
|
-
# Load environment variables from .env file
|
73
|
-
load_dotenv()
|
74
|
-
|
75
|
-
# --- Pydantic Model for Tool Input Schema ---
|
76
|
-
|
77
74
|
class ThoughtData(BaseModel):
|
78
75
|
"""
|
79
76
|
Represents the data structure for a single thought in the sequential
|
@@ -294,6 +291,44 @@ def format_thought_for_log(thought_data: ThoughtData) -> str:
|
|
294
291
|
|
295
292
|
# --- Agno Multi-Agent Team Setup ---
|
296
293
|
|
294
|
+
def get_model_config() -> tuple[Type[Model], str, str]:
|
295
|
+
"""
|
296
|
+
Determines the LLM provider, team model ID, and agent model ID based on environment variables.
|
297
|
+
|
298
|
+
Returns:
|
299
|
+
A tuple containing:
|
300
|
+
- ModelClass: The Agno model class (e.g., DeepSeek, Groq, OpenRouter).
|
301
|
+
- team_model_id: The model ID for the team coordinator.
|
302
|
+
- agent_model_id: The model ID for the specialist agents.
|
303
|
+
"""
|
304
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
305
|
+
logger.info(f"Selected LLM Provider: {provider}")
|
306
|
+
|
307
|
+
if provider == "deepseek":
|
308
|
+
ModelClass = DeepSeek
|
309
|
+
# Use environment variables for DeepSeek model IDs if set, otherwise use defaults
|
310
|
+
team_model_id = os.environ.get("DEEPSEEK_TEAM_MODEL_ID", "deepseek-chat")
|
311
|
+
agent_model_id = os.environ.get("DEEPSEEK_AGENT_MODEL_ID", "deepseek-chat")
|
312
|
+
logger.info(f"Using DeepSeek: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
313
|
+
elif provider == "groq":
|
314
|
+
ModelClass = Groq
|
315
|
+
team_model_id = os.environ.get("GROQ_TEAM_MODEL_ID", "deepseek-r1-distill-llama-70b")
|
316
|
+
agent_model_id = os.environ.get("GROQ_AGENT_MODEL_ID", "deepseek-r1-distill-llama-70b")
|
317
|
+
logger.info(f"Using Groq: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
318
|
+
elif provider == "openrouter":
|
319
|
+
ModelClass = OpenRouter
|
320
|
+
team_model_id = os.environ.get("OPENROUTER_TEAM_MODEL_ID", "deepseek/deepseek-chat-v3-0324")
|
321
|
+
agent_model_id = os.environ.get("OPENROUTER_AGENT_MODEL_ID", "deepseek/deepseek-chat-v3-0324")
|
322
|
+
logger.info(f"Using OpenRouter: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
323
|
+
else:
|
324
|
+
logger.error(f"Unsupported LLM_PROVIDER: {provider}. Defaulting to DeepSeek.")
|
325
|
+
ModelClass = DeepSeek
|
326
|
+
team_model_id = "deepseek-chat"
|
327
|
+
agent_model_id = "deepseek-chat"
|
328
|
+
|
329
|
+
return ModelClass, team_model_id, agent_model_id
|
330
|
+
|
331
|
+
|
297
332
|
def create_sequential_thinking_team() -> Team:
|
298
333
|
"""
|
299
334
|
Creates and configures the Agno multi-agent team for sequential thinking,
|
@@ -303,12 +338,13 @@ def create_sequential_thinking_team() -> Team:
|
|
303
338
|
An initialized Team instance.
|
304
339
|
"""
|
305
340
|
try:
|
306
|
-
|
307
|
-
|
308
|
-
|
341
|
+
ModelClass, team_model_id, agent_model_id = get_model_config()
|
342
|
+
team_model_instance = ModelClass(id=team_model_id)
|
343
|
+
agent_model_instance = ModelClass(id=agent_model_id)
|
344
|
+
|
309
345
|
except Exception as e:
|
310
|
-
logger.error(f"Error initializing
|
311
|
-
logger.error("Please ensure the necessary API keys and configurations are set.")
|
346
|
+
logger.error(f"Error initializing models: {e}")
|
347
|
+
logger.error("Please ensure the necessary API keys and configurations are set for the selected provider ({os.environ.get('LLM_PROVIDER', 'deepseek')}).")
|
312
348
|
sys.exit(1)
|
313
349
|
|
314
350
|
# REMOVED the separate Coordinator Agent definition.
|
@@ -334,7 +370,7 @@ def create_sequential_thinking_team() -> Team:
|
|
334
370
|
" 7. Return your response to the Team Coordinator.",
|
335
371
|
"Focus on fulfilling the delegated planning sub-task accurately and efficiently.",
|
336
372
|
],
|
337
|
-
model=
|
373
|
+
model=agent_model_instance, # Use the designated agent model
|
338
374
|
add_datetime_to_instructions=True,
|
339
375
|
markdown=True
|
340
376
|
)
|
@@ -358,7 +394,7 @@ def create_sequential_thinking_team() -> Team:
|
|
358
394
|
" 7. Return your response to the Team Coordinator.",
|
359
395
|
"Focus on accuracy and relevance for the delegated research request.",
|
360
396
|
],
|
361
|
-
model=
|
397
|
+
model=agent_model_instance, # Use the designated agent model
|
362
398
|
add_datetime_to_instructions=True,
|
363
399
|
markdown=True
|
364
400
|
)
|
@@ -382,7 +418,7 @@ def create_sequential_thinking_team() -> Team:
|
|
382
418
|
" 7. Return your response to the Team Coordinator.",
|
383
419
|
"Focus on depth and clarity for the delegated analytical task.",
|
384
420
|
],
|
385
|
-
model=
|
421
|
+
model=agent_model_instance, # Use the designated agent model
|
386
422
|
add_datetime_to_instructions=True,
|
387
423
|
markdown=True
|
388
424
|
)
|
@@ -407,7 +443,7 @@ def create_sequential_thinking_team() -> Team:
|
|
407
443
|
" 8. Return your response to the Team Coordinator.",
|
408
444
|
"Focus on rigorous and constructive critique for the delegated evaluation task.",
|
409
445
|
],
|
410
|
-
model=
|
446
|
+
model=agent_model_instance, # Use the designated agent model
|
411
447
|
add_datetime_to_instructions=True,
|
412
448
|
markdown=True
|
413
449
|
)
|
@@ -430,7 +466,7 @@ def create_sequential_thinking_team() -> Team:
|
|
430
466
|
" 6. Return your response to the Team Coordinator.",
|
431
467
|
"Focus on creating clarity and coherence for the delegated synthesis task.",
|
432
468
|
],
|
433
|
-
model=
|
469
|
+
model=agent_model_instance, # Use the designated agent model
|
434
470
|
add_datetime_to_instructions=True,
|
435
471
|
markdown=True
|
436
472
|
)
|
@@ -441,7 +477,7 @@ def create_sequential_thinking_team() -> Team:
|
|
441
477
|
name="SequentialThinkingTeam",
|
442
478
|
mode="coordinate",
|
443
479
|
members=[planner, researcher, analyzer, critic, synthesizer], # ONLY specialist agents
|
444
|
-
model=
|
480
|
+
model=team_model_instance, # Model for the Team's coordination logic
|
445
481
|
description="You are the Coordinator of a specialist team processing sequential thoughts. Your role is to manage the flow, delegate tasks, and synthesize results.",
|
446
482
|
instructions=[
|
447
483
|
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
@@ -515,9 +551,17 @@ async def app_lifespan() -> AsyncIterator[None]:
|
|
515
551
|
"""Manages the application lifecycle."""
|
516
552
|
global app_context
|
517
553
|
logger.info("Initializing application resources (Coordinate Mode)...")
|
518
|
-
|
519
|
-
|
520
|
-
|
554
|
+
try:
|
555
|
+
team = create_sequential_thinking_team()
|
556
|
+
app_context = AppContext(team=team)
|
557
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
558
|
+
logger.info(f"Agno team initialized in coordinate mode using provider: {provider}.")
|
559
|
+
except Exception as e:
|
560
|
+
logger.critical(f"Failed to initialize Agno team during lifespan setup: {e}", exc_info=True)
|
561
|
+
# Decide how to handle this - re-raise, exit, or continue without a team?
|
562
|
+
# For now, re-raise to prevent server starting in a broken state.
|
563
|
+
raise e
|
564
|
+
|
521
565
|
try:
|
522
566
|
yield
|
523
567
|
finally:
|
@@ -598,7 +642,20 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
598
642
|
global app_context
|
599
643
|
if not app_context or not app_context.team:
|
600
644
|
logger.error("Application context or Agno team not initialized during tool call.")
|
601
|
-
|
645
|
+
# Attempt re-initialization cautiously, or fail hard.
|
646
|
+
# Let's try re-initialization if app_lifespan wasn't used or failed silently.
|
647
|
+
logger.warning("Attempting to re-initialize team due to missing context...")
|
648
|
+
try:
|
649
|
+
team = create_sequential_thinking_team()
|
650
|
+
app_context = AppContext(team=team) # Re-create context
|
651
|
+
logger.info("Successfully re-initialized team and context.")
|
652
|
+
except Exception as init_err:
|
653
|
+
logger.critical(f"Failed to re-initialize Agno team during tool call: {init_err}", exc_info=True)
|
654
|
+
return json.dumps({
|
655
|
+
"error": "Critical Error: Application context not available and re-initialization failed.",
|
656
|
+
"status": "critical_failure"
|
657
|
+
}, indent=2)
|
658
|
+
# Or raise Exception("Critical Error: Application context not available.")
|
602
659
|
|
603
660
|
MIN_TOTAL_THOUGHTS = 5 # Keep a minimum suggestion
|
604
661
|
|
@@ -731,7 +788,9 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
731
788
|
|
732
789
|
def run():
|
733
790
|
"""Initializes and runs the MCP server in coordinate mode."""
|
734
|
-
|
791
|
+
selected_provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
792
|
+
logger.info(f"Using provider: {selected_provider}")
|
793
|
+
logger.info(f"Initializing Sequential Thinking Server (Coordinate Mode) with Provider: {selected_provider}...")
|
735
794
|
|
736
795
|
global app_context
|
737
796
|
# Initialize application resources using the lifespan manager implicitly if running via framework
|
@@ -742,7 +801,7 @@ def run():
|
|
742
801
|
try:
|
743
802
|
team = create_sequential_thinking_team()
|
744
803
|
app_context = AppContext(team=team)
|
745
|
-
logger.info("Agno team initialized directly in coordinate mode.")
|
804
|
+
logger.info(f"Agno team initialized directly in coordinate mode using provider: {selected_provider}.")
|
746
805
|
except Exception as e:
|
747
806
|
logger.critical(f"Failed to initialize Agno team: {e}", exc_info=True)
|
748
807
|
sys.exit(1)
|
@@ -759,27 +818,37 @@ def run():
|
|
759
818
|
logger.info("Shutting down application resources...")
|
760
819
|
app_context = None # Clean up context if initialized directly
|
761
820
|
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
821
|
+
def check_environment_variables():
|
822
|
+
"""Checks for necessary environment variables based on the selected provider."""
|
823
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
824
|
+
api_key_var = ""
|
825
|
+
base_url_var = "" # Some providers might not strictly need a base URL override
|
826
|
+
|
827
|
+
if provider == "deepseek":
|
828
|
+
api_key_var = "DEEPSEEK_API_KEY"
|
829
|
+
elif provider == "groq":
|
830
|
+
api_key_var = "GROQ_API_KEY"
|
831
|
+
elif provider == "openrouter":
|
832
|
+
api_key_var = "OPENROUTER_API_KEY"
|
833
|
+
if api_key_var and api_key_var not in os.environ:
|
834
|
+
logger.warning(f"{api_key_var} environment variable not found. Model initialization for '{provider}' might fail.")
|
768
835
|
try:
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
for
|
773
|
-
if hasattr(member, 'tools') and member.tools:
|
774
|
-
if any(isinstance(t, ExaTools) for t in member.tools):
|
775
|
-
uses_exa = True
|
776
|
-
break # Found it, no need to check further
|
836
|
+
ModelClass, _, _ = get_model_config() # Just need the class for dummy init
|
837
|
+
dummy_model = ModelClass(id="dummy-check") # Use a placeholder ID
|
838
|
+
researcher_for_check = Agent(name="CheckAgent", tools=[ExaTools()], model=dummy_model)
|
839
|
+
uses_exa = any(isinstance(t, ExaTools) for t in researcher_for_check.tools)
|
777
840
|
|
778
841
|
if uses_exa and "EXA_API_KEY" not in os.environ:
|
779
842
|
logger.warning("EXA_API_KEY environment variable not found, but ExaTools are configured in a team member. Researcher agent might fail.")
|
843
|
+
except Exception as e:
|
844
|
+
logger.error(f"Could not perform ExaTools check due to an error: {e}")
|
780
845
|
|
781
|
-
|
846
|
+
|
847
|
+
if __name__ == "__main__":
|
848
|
+
check_environment_variables()
|
849
|
+
try:
|
782
850
|
run()
|
783
851
|
except Exception as e:
|
784
|
-
logger.critical(f"Failed during
|
852
|
+
logger.critical(f"Failed during server run: {e}", exc_info=True)
|
785
853
|
sys.exit(1)
|
854
|
+
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.0}/pyproject.toml
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "mcp-server-mas-sequential-thinking"
|
3
|
-
version = "0.
|
3
|
+
version = "0.2.0"
|
4
4
|
description = "MCP Agent Implementation for Sequential Thinking"
|
5
5
|
readme = "README.md"
|
6
6
|
requires-python = ">=3.10"
|
@@ -13,6 +13,7 @@ dependencies = [
|
|
13
13
|
"exa-py",
|
14
14
|
"python-dotenv",
|
15
15
|
"mcp",
|
16
|
+
"groq",
|
16
17
|
]
|
17
18
|
|
18
19
|
[project.optional-dependencies]
|
@@ -32,11 +33,3 @@ build-backend = "hatchling.build"
|
|
32
33
|
|
33
34
|
[tool.hatch.build.targets.wheel]
|
34
35
|
include = ["main.py"]
|
35
|
-
|
36
|
-
[tool.uv]
|
37
|
-
dev-dependencies = [
|
38
|
-
"pytest",
|
39
|
-
"black",
|
40
|
-
"isort",
|
41
|
-
"mypy",
|
42
|
-
]
|