mcp-server-mas-sequential-thinking 0.2.0__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/PKG-INFO +11 -5
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/README.md +10 -4
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/README.zh-CN.md +10 -4
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/main.py +6 -6
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/pyproject.toml +1 -1
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/uv.lock +1 -1
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/.env.example +0 -0
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/.gitignore +0 -0
- {mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/.python-version +0 -0
{mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mcp-server-mas-sequential-thinking
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.2
|
4
4
|
Summary: MCP Agent Implementation for Sequential Thinking
|
5
5
|
Author-email: Frad LEE <fradser@gmail.com>
|
6
6
|
Requires-Python: >=3.10
|
@@ -146,17 +146,23 @@ The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
|
146
146
|
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
147
147
|
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
148
148
|
# Example for DeepSeek:
|
149
|
-
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
150
|
-
# DEEPSEEK_AGENT_MODEL_ID="deepseek-
|
149
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat" # Note: `deepseek-reasoner` is not recommended as it doesn't support function calling
|
150
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
|
151
151
|
# Example for OpenRouter:
|
152
|
-
# OPENROUTER_TEAM_MODEL_ID="
|
153
|
-
# OPENROUTER_AGENT_MODEL_ID="
|
152
|
+
# OPENROUTER_TEAM_MODEL_ID="deepseek/deepseek-r1"
|
153
|
+
# OPENROUTER_AGENT_MODEL_ID="deepseek/deepseek-chat-v3-0324"
|
154
154
|
|
155
155
|
# --- External Tools ---
|
156
156
|
# Required ONLY if the Researcher agent is used and needs Exa
|
157
157
|
EXA_API_KEY="your_exa_api_key"
|
158
158
|
```
|
159
159
|
|
160
|
+
**Note on Model Selection:**
|
161
|
+
|
162
|
+
* The `TEAM_MODEL_ID` is used by the Coordinator (the `Team` object itself). This role requires strong reasoning, synthesis, and delegation capabilities. Using a more powerful model (like `deepseek-r1`, `claude-3-opus`, or `gpt-4-turbo`) is often beneficial here, even if it's slower or more expensive.
|
163
|
+
* The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These agents handle more focused sub-tasks. You might choose a faster or more cost-effective model (like `deepseek-v3`, `claude-3-sonnet`, `llama3-70b`) for specialists, depending on the complexity of the tasks they typically handle and your budget/performance requirements.
|
164
|
+
* The defaults provided in `main.py` (e.g., `deepseek-reasoner` for agents when using DeepSeek) are starting points. Experimentation is encouraged to find the optimal balance for your specific use case.
|
165
|
+
|
160
166
|
3. **Install Dependencies:**
|
161
167
|
|
162
168
|
* **Using `uv` (Recommended):**
|
{mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/README.md
RENAMED
@@ -127,17 +127,23 @@ The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
|
127
127
|
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
128
128
|
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
129
129
|
# Example for DeepSeek:
|
130
|
-
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
131
|
-
# DEEPSEEK_AGENT_MODEL_ID="deepseek-
|
130
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat" # Note: `deepseek-reasoner` is not recommended as it doesn't support function calling
|
131
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
|
132
132
|
# Example for OpenRouter:
|
133
|
-
# OPENROUTER_TEAM_MODEL_ID="
|
134
|
-
# OPENROUTER_AGENT_MODEL_ID="
|
133
|
+
# OPENROUTER_TEAM_MODEL_ID="deepseek/deepseek-r1"
|
134
|
+
# OPENROUTER_AGENT_MODEL_ID="deepseek/deepseek-chat-v3-0324"
|
135
135
|
|
136
136
|
# --- External Tools ---
|
137
137
|
# Required ONLY if the Researcher agent is used and needs Exa
|
138
138
|
EXA_API_KEY="your_exa_api_key"
|
139
139
|
```
|
140
140
|
|
141
|
+
**Note on Model Selection:**
|
142
|
+
|
143
|
+
* The `TEAM_MODEL_ID` is used by the Coordinator (the `Team` object itself). This role requires strong reasoning, synthesis, and delegation capabilities. Using a more powerful model (like `deepseek-r1`, `claude-3-opus`, or `gpt-4-turbo`) is often beneficial here, even if it's slower or more expensive.
|
144
|
+
* The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These agents handle more focused sub-tasks. You might choose a faster or more cost-effective model (like `deepseek-v3`, `claude-3-sonnet`, `llama3-70b`) for specialists, depending on the complexity of the tasks they typically handle and your budget/performance requirements.
|
145
|
+
* The defaults provided in `main.py` (e.g., `deepseek-reasoner` for agents when using DeepSeek) are starting points. Experimentation is encouraged to find the optimal balance for your specific use case.
|
146
|
+
|
141
147
|
3. **Install Dependencies:**
|
142
148
|
|
143
149
|
* **Using `uv` (Recommended):**
|
@@ -130,17 +130,23 @@
|
|
130
130
|
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
131
131
|
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
132
132
|
# DeepSeek 示例:
|
133
|
-
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
134
|
-
# DEEPSEEK_AGENT_MODEL_ID="deepseek-
|
133
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat" # 注意:不推荐使用 `deepseek-reasoner`,因为它不支持函数调用
|
134
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # 推荐用于专家智能体
|
135
135
|
# OpenRouter 示例:
|
136
|
-
# OPENROUTER_TEAM_MODEL_ID="
|
137
|
-
# OPENROUTER_AGENT_MODEL_ID="
|
136
|
+
# OPENROUTER_TEAM_MODEL_ID="deepseek/deepseek-r1"
|
137
|
+
# OPENROUTER_AGENT_MODEL_ID="deepseek/deepseek-chat-v3-0324"
|
138
138
|
|
139
139
|
# --- 外部工具 ---
|
140
140
|
# 仅当研究员智能体被使用且需要 Exa 时才必需
|
141
141
|
EXA_API_KEY="你的_exa_api_密钥"
|
142
142
|
```
|
143
143
|
|
144
|
+
**关于模型选择的说明:**
|
145
|
+
|
146
|
+
* `TEAM_MODEL_ID` 由协调器(`Team` 对象本身)使用。该角色需要强大的推理、综合和委派能力。使用更强大的模型(如 `deepseek-r1`、`claude-3-opus` 或 `gpt-4-turbo`)通常更有益,即使它可能更慢或更昂贵。
|
147
|
+
* `AGENT_MODEL_ID` 由专家智能体(规划器、研究员等)使用。这些智能体处理更集中的子任务。您可以为专家选择更快或更具成本效益的模型(如 `deepseek-v3`、`claude-3-sonnet`、`llama3-70b`),具体取决于它们通常处理的任务复杂性以及您的预算/性能要求。
|
148
|
+
* `main.py` 中提供的默认值(例如,使用 DeepSeek 时的 `deepseek-chat`)是起点。鼓励进行实验,以找到适合您特定用例的最佳平衡点。
|
149
|
+
|
144
150
|
3. **安装依赖:**
|
145
151
|
|
146
152
|
* **使用 `uv` (推荐):**
|
{mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/main.py
RENAMED
@@ -313,12 +313,12 @@ def get_model_config() -> tuple[Type[Model], str, str]:
|
|
313
313
|
elif provider == "groq":
|
314
314
|
ModelClass = Groq
|
315
315
|
team_model_id = os.environ.get("GROQ_TEAM_MODEL_ID", "deepseek-r1-distill-llama-70b")
|
316
|
-
agent_model_id = os.environ.get("GROQ_AGENT_MODEL_ID", "
|
316
|
+
agent_model_id = os.environ.get("GROQ_AGENT_MODEL_ID", "qwen-2.5-32b")
|
317
317
|
logger.info(f"Using Groq: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
318
318
|
elif provider == "openrouter":
|
319
319
|
ModelClass = OpenRouter
|
320
320
|
team_model_id = os.environ.get("OPENROUTER_TEAM_MODEL_ID", "deepseek/deepseek-chat-v3-0324")
|
321
|
-
agent_model_id = os.environ.get("OPENROUTER_AGENT_MODEL_ID", "deepseek/deepseek-
|
321
|
+
agent_model_id = os.environ.get("OPENROUTER_AGENT_MODEL_ID", "deepseek/deepseek-r1")
|
322
322
|
logger.info(f"Using OpenRouter: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
323
323
|
else:
|
324
324
|
logger.error(f"Unsupported LLM_PROVIDER: {provider}. Defaulting to DeepSeek.")
|
@@ -654,7 +654,7 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
654
654
|
return json.dumps({
|
655
655
|
"error": "Critical Error: Application context not available and re-initialization failed.",
|
656
656
|
"status": "critical_failure"
|
657
|
-
}, indent=2)
|
657
|
+
}, indent=2, ensure_ascii=False)
|
658
658
|
# Or raise Exception("Critical Error: Application context not available.")
|
659
659
|
|
660
660
|
MIN_TOTAL_THOUGHTS = 5 # Keep a minimum suggestion
|
@@ -768,7 +768,7 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
768
768
|
"status": "success"
|
769
769
|
}
|
770
770
|
|
771
|
-
return json.dumps(result_data, indent=2)
|
771
|
+
return json.dumps(result_data, indent=2, ensure_ascii=False)
|
772
772
|
|
773
773
|
except ValidationError as e:
|
774
774
|
logger.error(f"Validation Error processing tool call: {e}")
|
@@ -776,13 +776,13 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
776
776
|
return json.dumps({
|
777
777
|
"error": f"Input validation failed: {e}",
|
778
778
|
"status": "validation_error"
|
779
|
-
}, indent=2)
|
779
|
+
}, indent=2, ensure_ascii=False)
|
780
780
|
except Exception as e:
|
781
781
|
logger.exception(f"Error processing tool call") # Log full traceback
|
782
782
|
return json.dumps({
|
783
783
|
"error": f"An unexpected error occurred: {str(e)}",
|
784
784
|
"status": "failed"
|
785
|
-
}, indent=2)
|
785
|
+
}, indent=2, ensure_ascii=False)
|
786
786
|
|
787
787
|
# --- Main Execution ---
|
788
788
|
|
{mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/.env.example
RENAMED
File without changes
|
{mcp_server_mas_sequential_thinking-0.2.0 → mcp_server_mas_sequential_thinking-0.2.2}/.gitignore
RENAMED
File without changes
|
File without changes
|