mcp-server-mas-sequential-thinking 0.1.3__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_server_mas_sequential_thinking-0.2.1/.env.example +27 -0
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/PKG-INFO +48 -12
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/README.md +46 -11
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/README.zh-CN.md +47 -68
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/main.py +114 -45
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/pyproject.toml +2 -9
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/uv.lock +169 -166
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/.gitignore +0 -0
- {mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/.python-version +0 -0
@@ -0,0 +1,27 @@
|
|
1
|
+
# --- LLM Configuration ---
|
2
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
3
|
+
LLM_PROVIDER="deepseek"
|
4
|
+
|
5
|
+
# Provide the API key for the chosen provider:
|
6
|
+
# GROQ_API_KEY="your_groq_api_key"
|
7
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
8
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
9
|
+
|
10
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
11
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
12
|
+
|
13
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
14
|
+
# Defaults are set within the code based on the provider if these are not set.
|
15
|
+
# Example for Groq:
|
16
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
17
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
18
|
+
# Example for DeepSeek:
|
19
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
20
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
21
|
+
# Example for OpenRouter:
|
22
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
23
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
24
|
+
|
25
|
+
# --- External Tools ---
|
26
|
+
# Required ONLY if the Researcher agent is used and needs Exa
|
27
|
+
EXA_API_KEY="your_exa_api_key"
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/PKG-INFO
RENAMED
@@ -1,12 +1,13 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mcp-server-mas-sequential-thinking
|
3
|
-
Version: 0.1
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: MCP Agent Implementation for Sequential Thinking
|
5
5
|
Author-email: Frad LEE <fradser@gmail.com>
|
6
6
|
Requires-Python: >=3.10
|
7
7
|
Requires-Dist: agno
|
8
8
|
Requires-Dist: asyncio
|
9
9
|
Requires-Dist: exa-py
|
10
|
+
Requires-Dist: groq
|
10
11
|
Requires-Dist: mcp
|
11
12
|
Requires-Dist: python-dotenv
|
12
13
|
Provides-Extra: dev
|
@@ -80,8 +81,11 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
80
81
|
## Prerequisites
|
81
82
|
|
82
83
|
* Python 3.10+
|
83
|
-
* Access to a compatible LLM API (configured for `agno
|
84
|
-
*
|
84
|
+
* Access to a compatible LLM API (configured for `agno`). The system now supports:
|
85
|
+
* **Groq:** Requires `GROQ_API_KEY`.
|
86
|
+
* **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
|
87
|
+
* **OpenRouter:** Requires `OPENROUTER_API_KEY`.
|
88
|
+
* Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
|
85
89
|
* Exa API Key (if using the Researcher agent's capabilities)
|
86
90
|
* `EXA_API_KEY` environment variable.
|
87
91
|
* `uv` package manager (recommended) or `pip`.
|
@@ -90,7 +94,9 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
90
94
|
|
91
95
|
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
92
96
|
|
93
|
-
|
97
|
+
The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
98
|
+
|
99
|
+
```json
|
94
100
|
{
|
95
101
|
"mcpServers": {
|
96
102
|
"mas-sequential-thinking": {
|
@@ -98,14 +104,17 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
98
104
|
"args": [
|
99
105
|
"mcp-server-mas-sequential-thinking"
|
100
106
|
],
|
101
|
-
env": {
|
102
|
-
"
|
103
|
-
"
|
104
|
-
"
|
107
|
+
"env": {
|
108
|
+
"LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
|
109
|
+
// "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
|
110
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
|
111
|
+
// "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
|
112
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
|
113
|
+
"EXA_API_KEY": "your_exa_api_key" // Only if using Exa
|
105
114
|
}
|
106
115
|
}
|
107
116
|
}
|
108
|
-
}
|
117
|
+
}
|
109
118
|
```
|
110
119
|
|
111
120
|
## Installation & Setup
|
@@ -119,14 +128,41 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
119
128
|
2. **Set Environment Variables:**
|
120
129
|
Create a `.env` file in the root directory or export the variables:
|
121
130
|
```dotenv
|
122
|
-
#
|
123
|
-
|
124
|
-
|
131
|
+
# --- LLM Configuration ---
|
132
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
133
|
+
LLM_PROVIDER="deepseek"
|
125
134
|
|
135
|
+
# Provide the API key for the chosen provider:
|
136
|
+
# GROQ_API_KEY="your_groq_api_key"
|
137
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
138
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
139
|
+
|
140
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
141
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
142
|
+
|
143
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
144
|
+
# Defaults are set within the code based on the provider if these are not set.
|
145
|
+
# Example for Groq:
|
146
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
147
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
148
|
+
# Example for DeepSeek:
|
149
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-reasoner" # Recommended for coordination
|
150
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
|
151
|
+
# Example for OpenRouter:
|
152
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
153
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
154
|
+
|
155
|
+
# --- External Tools ---
|
126
156
|
# Required ONLY if the Researcher agent is used and needs Exa
|
127
157
|
EXA_API_KEY="your_exa_api_key"
|
128
158
|
```
|
129
159
|
|
160
|
+
**Note on Model Selection:**
|
161
|
+
|
162
|
+
* The `TEAM_MODEL_ID` is used by the Coordinator (the `Team` object itself). This role requires strong reasoning, synthesis, and delegation capabilities. Using a more powerful model (like `deepseek-reasoner`, `claude-3-opus`, or `gpt-4-turbo`) is often beneficial here, even if it's slower or more expensive.
|
163
|
+
* The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These agents handle more focused sub-tasks. You might choose a faster or more cost-effective model (like `deepseek-chat`, `claude-3-sonnet`, `llama3-70b`) for specialists, depending on the complexity of the tasks they typically handle and your budget/performance requirements.
|
164
|
+
* The defaults provided in `main.py` (e.g., `deepseek-reasoner` for agents when using DeepSeek) are starting points. Experimentation is encouraged to find the optimal balance for your specific use case.
|
165
|
+
|
130
166
|
3. **Install Dependencies:**
|
131
167
|
|
132
168
|
* **Using `uv` (Recommended):**
|
{mcp_server_mas_sequential_thinking-0.1.3 → mcp_server_mas_sequential_thinking-0.2.1}/README.md
RENAMED
@@ -62,8 +62,11 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
62
62
|
## Prerequisites
|
63
63
|
|
64
64
|
* Python 3.10+
|
65
|
-
* Access to a compatible LLM API (configured for `agno
|
66
|
-
*
|
65
|
+
* Access to a compatible LLM API (configured for `agno`). The system now supports:
|
66
|
+
* **Groq:** Requires `GROQ_API_KEY`.
|
67
|
+
* **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
|
68
|
+
* **OpenRouter:** Requires `OPENROUTER_API_KEY`.
|
69
|
+
* Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
|
67
70
|
* Exa API Key (if using the Researcher agent's capabilities)
|
68
71
|
* `EXA_API_KEY` environment variable.
|
69
72
|
* `uv` package manager (recommended) or `pip`.
|
@@ -72,7 +75,9 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
72
75
|
|
73
76
|
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
74
77
|
|
75
|
-
|
78
|
+
The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
79
|
+
|
80
|
+
```json
|
76
81
|
{
|
77
82
|
"mcpServers": {
|
78
83
|
"mas-sequential-thinking": {
|
@@ -80,14 +85,17 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
80
85
|
"args": [
|
81
86
|
"mcp-server-mas-sequential-thinking"
|
82
87
|
],
|
83
|
-
env": {
|
84
|
-
"
|
85
|
-
"
|
86
|
-
"
|
88
|
+
"env": {
|
89
|
+
"LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
|
90
|
+
// "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
|
91
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
|
92
|
+
// "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
|
93
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
|
94
|
+
"EXA_API_KEY": "your_exa_api_key" // Only if using Exa
|
87
95
|
}
|
88
96
|
}
|
89
97
|
}
|
90
|
-
}
|
98
|
+
}
|
91
99
|
```
|
92
100
|
|
93
101
|
## Installation & Setup
|
@@ -101,14 +109,41 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
101
109
|
2. **Set Environment Variables:**
|
102
110
|
Create a `.env` file in the root directory or export the variables:
|
103
111
|
```dotenv
|
104
|
-
#
|
105
|
-
|
106
|
-
|
112
|
+
# --- LLM Configuration ---
|
113
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
114
|
+
LLM_PROVIDER="deepseek"
|
107
115
|
|
116
|
+
# Provide the API key for the chosen provider:
|
117
|
+
# GROQ_API_KEY="your_groq_api_key"
|
118
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
119
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
120
|
+
|
121
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
122
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
123
|
+
|
124
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
125
|
+
# Defaults are set within the code based on the provider if these are not set.
|
126
|
+
# Example for Groq:
|
127
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
128
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
129
|
+
# Example for DeepSeek:
|
130
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-reasoner" # Recommended for coordination
|
131
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
|
132
|
+
# Example for OpenRouter:
|
133
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
134
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
135
|
+
|
136
|
+
# --- External Tools ---
|
108
137
|
# Required ONLY if the Researcher agent is used and needs Exa
|
109
138
|
EXA_API_KEY="your_exa_api_key"
|
110
139
|
```
|
111
140
|
|
141
|
+
**Note on Model Selection:**
|
142
|
+
|
143
|
+
* The `TEAM_MODEL_ID` is used by the Coordinator (the `Team` object itself). This role requires strong reasoning, synthesis, and delegation capabilities. Using a more powerful model (like `deepseek-reasoner`, `claude-3-opus`, or `gpt-4-turbo`) is often beneficial here, even if it's slower or more expensive.
|
144
|
+
* The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These agents handle more focused sub-tasks. You might choose a faster or more cost-effective model (like `deepseek-chat`, `claude-3-sonnet`, `llama3-70b`) for specialists, depending on the complexity of the tasks they typically handle and your budget/performance requirements.
|
145
|
+
* The defaults provided in `main.py` (e.g., `deepseek-reasoner` for agents when using DeepSeek) are starting points. Experimentation is encouraged to find the optimal balance for your specific use case.
|
146
|
+
|
112
147
|
3. **Install Dependencies:**
|
113
148
|
|
114
149
|
* **Using `uv` (Recommended):**
|
@@ -63,8 +63,11 @@
|
|
63
63
|
## 先决条件
|
64
64
|
|
65
65
|
* Python 3.10+
|
66
|
-
* 访问兼容的 LLM API(为 `agno`
|
67
|
-
* `
|
66
|
+
* 访问兼容的 LLM API(为 `agno` 配置)。系统现在支持:
|
67
|
+
* **Groq:** 需要 `GROQ_API_KEY`。
|
68
|
+
* **DeepSeek:** 需要 `DEEPSEEK_API_KEY`。
|
69
|
+
* **OpenRouter:** 需要 `OPENROUTER_API_KEY`。
|
70
|
+
* 使用 `LLM_PROVIDER` 环境变量配置所需的提供商(默认为 `deepseek`)。
|
68
71
|
* Exa API 密钥(如果使用研究员智能体的功能)
|
69
72
|
* `EXA_API_KEY` 环境变量。
|
70
73
|
* `uv` 包管理器(推荐)或 `pip`。
|
@@ -73,6 +76,8 @@
|
|
73
76
|
|
74
77
|
此服务器作为标准可执行脚本运行,通过 stdio 进行通信,符合 MCP 的预期。确切的配置方法取决于您具体的 MCP 客户端实现。请查阅您客户端的文档以获取详细信息。
|
75
78
|
|
79
|
+
`env` 部分应包含您选择的 `LLM_PROVIDER` 对应的 API 密钥。
|
80
|
+
|
76
81
|
```json
|
77
82
|
{
|
78
83
|
"mcpServers": {
|
@@ -82,9 +87,12 @@
|
|
82
87
|
"mcp-server-mas-sequential-thinking"
|
83
88
|
],
|
84
89
|
"env": {
|
85
|
-
"
|
86
|
-
"
|
87
|
-
"
|
90
|
+
"LLM_PROVIDER": "deepseek", // 或 "groq", "openrouter"
|
91
|
+
// "GROQ_API_KEY": "你的_groq_api_密钥", // 仅当 LLM_PROVIDER="groq" 时需要
|
92
|
+
"DEEPSEEK_API_KEY": "你的_deepseek_api_密钥", // 默认提供商
|
93
|
+
// "OPENROUTER_API_KEY": "你的_openrouter_api_密钥", // 仅当 LLM_PROVIDER="openrouter" 时需要
|
94
|
+
"DEEPSEEK_BASE_URL": "你的_base_url_如果需要", // 可选:如果为 DeepSeek 使用自定义端点
|
95
|
+
"EXA_API_KEY": "你的_exa_api_密钥" // 仅当使用 Exa 时需要
|
88
96
|
}
|
89
97
|
}
|
90
98
|
}
|
@@ -104,14 +112,41 @@
|
|
104
112
|
在根目录创建一个 `.env` 文件或导出变量:
|
105
113
|
|
106
114
|
```dotenv
|
107
|
-
#
|
108
|
-
|
109
|
-
|
115
|
+
# --- LLM 配置 ---
|
116
|
+
# 选择 LLM 提供商: "deepseek" (默认), "groq", 或 "openrouter"
|
117
|
+
LLM_PROVIDER="deepseek"
|
110
118
|
|
119
|
+
# 提供所选提供商的 API 密钥:
|
120
|
+
# GROQ_API_KEY="你的_groq_api_密钥"
|
121
|
+
DEEPSEEK_API_KEY="你的_deepseek_api_密钥"
|
122
|
+
# OPENROUTER_API_KEY="你的_openrouter_api_密钥"
|
123
|
+
|
124
|
+
# 可选: 基础 URL 覆盖 (例如, 用于自定义 DeepSeek 端点)
|
125
|
+
DEEPSEEK_BASE_URL="你的_base_url_如果需要"
|
126
|
+
|
127
|
+
# 可选: 为团队协调器和专家智能体指定不同的模型
|
128
|
+
# 如果未设置这些变量,则代码会根据提供商设置默认值。
|
129
|
+
# Groq 示例:
|
130
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
131
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
132
|
+
# DeepSeek 示例:
|
133
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-reasoner" # 推荐用于协调
|
134
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # 推荐用于专家智能体
|
135
|
+
# OpenRouter 示例:
|
136
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
137
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
138
|
+
|
139
|
+
# --- 外部工具 ---
|
111
140
|
# 仅当研究员智能体被使用且需要 Exa 时才必需
|
112
141
|
EXA_API_KEY="你的_exa_api_密钥"
|
113
142
|
```
|
114
143
|
|
144
|
+
**关于模型选择的说明:**
|
145
|
+
|
146
|
+
* `TEAM_MODEL_ID` 由协调器(`Team` 对象本身)使用。该角色需要强大的推理、综合和委派能力。在此处使用更强大的模型(如 `deepseek-reasoner`、`claude-3-opus` 或 `gpt-4-turbo`)通常更有益,即使它可能更慢或更昂贵。
|
147
|
+
* `AGENT_MODEL_ID` 由专家智能体(规划器、研究员等)使用。这些智能体处理更集中的子任务。您可以为专家选择更快或更具成本效益的模型(如 `deepseek-chat`、`claude-3-sonnet`、`llama3-70b`),具体取决于它们通常处理的任务复杂性以及您的预算/性能要求。
|
148
|
+
* `main.py` 中提供的默认值(例如,使用 DeepSeek 时,智能体默认为 `deepseek-reasoner`)是起点。鼓励进行实验,以找到适合您特定用例的最佳平衡点。
|
149
|
+
|
115
150
|
3. **安装依赖:**
|
116
151
|
|
117
152
|
* **使用 `uv` (推荐):**
|
@@ -165,64 +200,8 @@ python 你的主脚本名称.py
|
|
165
200
|
LLM 会迭代地与此工具交互:
|
166
201
|
|
167
202
|
1. **LLM:** 使用 `sequential-thinking-starter` 提示和问题。
|
168
|
-
2. **LLM:** 使用 `thoughtNumber: 1`、初始 `thought
|
169
|
-
3. **服务器:** MAS 处理思考 -\>
|
203
|
+
2. **LLM:** 使用 `thoughtNumber: 1`、初始 `thought`(例如,"规划分析...")、`totalThoughts` 预估、`nextThoughtNeeded: True` 调用 `sequentialthinking` 工具。
|
204
|
+
3. **服务器:** MAS 处理思考 -\> 协调器综合响应并提供指导(例如,"分析计划完成。建议下一步研究 X。暂不推荐修订。")。
|
170
205
|
4. **LLM:** 接收包含 `coordinatorResponse` 的 JSON 响应。
|
171
|
-
5. **LLM:** 根据 `coordinatorResponse`
|
172
|
-
6. **LLM:** 使用 `thoughtNumber: 2`、新的 `thought`、更新的 `totalThoughts`(如果需要)、`
|
173
|
-
7. **服务器:** MAS 处理 -\> 协调器综合(例如,“研究完成。发现表明思考 \#1 的假设存在缺陷。建议:修订思考 \#1...”)。
|
174
|
-
8. **LLM:** 接收响应,看到建议。
|
175
|
-
9. **LLM:** 构思一个修订思考。
|
176
|
-
10. **LLM:** 使用 `thoughtNumber: 3`、修订后的 `thought`、`isRevision: True`、`revisesThought: 1`、`nextThoughtNeeded: True` 调用 `sequentialthinking` 工具。
|
177
|
-
11. **... 以此类推,可能根据需要进行分支或扩展。**
|
178
|
-
|
179
|
-
### 工具响应格式
|
180
|
-
|
181
|
-
该工具返回一个 JSON 字符串,包含:
|
182
|
-
|
183
|
-
```json
|
184
|
-
{
|
185
|
-
"processedThoughtNumber": int, // 处理的思考编号
|
186
|
-
"estimatedTotalThoughts": int, // 预估总思考数
|
187
|
-
"nextThoughtNeeded": bool, // 是否需要下一个思考
|
188
|
-
"coordinatorResponse": "来自智能体团队的综合输出,包括分析、发现和下一步指导...", // 协调器的综合响应
|
189
|
-
"branches": ["分支ID列表"], // 所有分支 ID 的列表
|
190
|
-
"thoughtHistoryLength": int, // 思考历史长度
|
191
|
-
"branchDetails": { // 分支详情
|
192
|
-
"currentBranchId": "main | branchId", // 当前分支 ID
|
193
|
-
"branchOriginThought": null | int, // 分支起源的思考编号
|
194
|
-
"allBranches": {"main": 数量, "branchId": 数量, ...} // 所有分支及其包含的思考数
|
195
|
-
},
|
196
|
-
"isRevision": bool, // 是否为修订
|
197
|
-
"revisesThought": null | int, // 修订的思考编号
|
198
|
-
"isBranch": bool, // 是否为分支操作产生的思考
|
199
|
-
"status": "success | validation_error | failed", // 状态
|
200
|
-
"error": "如果状态不是 success 时的错误信息" // 可选
|
201
|
-
}
|
202
|
-
```
|
203
|
-
|
204
|
-
## 日志记录
|
205
|
-
|
206
|
-
* 日志写入 `~/.sequential_thinking/logs/sequential_thinking.log`。
|
207
|
-
* 使用 Python 标准的 `logging` 模块。
|
208
|
-
* 包含轮转文件处理器(10MB 限制,5 个备份)和控制台处理器(INFO 级别)。
|
209
|
-
* 日志包含时间戳、级别、记录器名称和消息,包括格式化的思考表示。
|
210
|
-
|
211
|
-
## 开发
|
212
|
-
|
213
|
-
(如果适用,在此处添加开发指南,例如设置开发环境、运行测试、代码检查等。)
|
214
|
-
|
215
|
-
1. 克隆仓库。
|
216
|
-
2. 设置虚拟环境。
|
217
|
-
3. 安装依赖项,可能包括开发附加项:
|
218
|
-
```bash
|
219
|
-
# 使用 uv
|
220
|
-
uv pip install -e ".[dev]"
|
221
|
-
# 使用 pip
|
222
|
-
pip install -e ".[dev]"
|
223
|
-
```
|
224
|
-
4. 运行代码检查器/格式化器/测试。
|
225
|
-
|
226
|
-
## 许可证
|
227
|
-
|
228
|
-
MIT
|
206
|
+
5. **LLM:** 根据 `coordinatorResponse` 构思下一个思考(例如,"使用 Exa 研究 X...")。
|
207
|
+
6. **LLM:** 使用 `thoughtNumber: 2`、新的 `thought`、更新的 `totalThoughts`(如果需要)、`
|