mcp-server-mas-sequential-thinking 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- main.py +111 -42
- {mcp_server_mas_sequential_thinking-0.1.3.dist-info → mcp_server_mas_sequential_thinking-0.2.0.dist-info}/METADATA +42 -12
- mcp_server_mas_sequential_thinking-0.2.0.dist-info/RECORD +5 -0
- mcp_server_mas_sequential_thinking-0.1.3.dist-info/RECORD +0 -5
- {mcp_server_mas_sequential_thinking-0.1.3.dist-info → mcp_server_mas_sequential_thinking-0.2.0.dist-info}/WHEEL +0 -0
- {mcp_server_mas_sequential_thinking-0.1.3.dist-info → mcp_server_mas_sequential_thinking-0.2.0.dist-info}/entry_points.txt +0 -0
main.py
CHANGED
@@ -4,11 +4,14 @@ import sys
|
|
4
4
|
from contextlib import asynccontextmanager
|
5
5
|
from dataclasses import dataclass, field
|
6
6
|
from datetime import datetime
|
7
|
-
from typing import Any, AsyncIterator, Dict, List, Optional
|
7
|
+
from typing import Any, AsyncIterator, Dict, List, Optional, Type
|
8
8
|
|
9
9
|
from mcp.server.fastmcp import FastMCP
|
10
10
|
from agno.agent import Agent
|
11
|
+
from agno.models.base import Model
|
11
12
|
from agno.models.deepseek import DeepSeek
|
13
|
+
from agno.models.groq import Groq
|
14
|
+
from agno.models.openrouter import OpenRouter
|
12
15
|
from agno.team.team import Team
|
13
16
|
from agno.tools.exa import ExaTools
|
14
17
|
from agno.tools.thinking import ThinkingTools
|
@@ -16,12 +19,12 @@ from dotenv import load_dotenv
|
|
16
19
|
from pydantic import (BaseModel, ConfigDict, Field, ValidationError,
|
17
20
|
field_validator, model_validator)
|
18
21
|
|
19
|
-
# Add logging imports and setup
|
20
22
|
import logging
|
21
23
|
import logging.handlers
|
22
24
|
from pathlib import Path
|
23
25
|
|
24
|
-
|
26
|
+
load_dotenv()
|
27
|
+
|
25
28
|
def setup_logging() -> logging.Logger:
|
26
29
|
"""
|
27
30
|
Set up application logging with both file and console handlers.
|
@@ -66,14 +69,8 @@ def setup_logging() -> logging.Logger:
|
|
66
69
|
|
67
70
|
return logger
|
68
71
|
|
69
|
-
# Initialize logger
|
70
72
|
logger = setup_logging()
|
71
73
|
|
72
|
-
# Load environment variables from .env file
|
73
|
-
load_dotenv()
|
74
|
-
|
75
|
-
# --- Pydantic Model for Tool Input Schema ---
|
76
|
-
|
77
74
|
class ThoughtData(BaseModel):
|
78
75
|
"""
|
79
76
|
Represents the data structure for a single thought in the sequential
|
@@ -294,6 +291,44 @@ def format_thought_for_log(thought_data: ThoughtData) -> str:
|
|
294
291
|
|
295
292
|
# --- Agno Multi-Agent Team Setup ---
|
296
293
|
|
294
|
+
def get_model_config() -> tuple[Type[Model], str, str]:
|
295
|
+
"""
|
296
|
+
Determines the LLM provider, team model ID, and agent model ID based on environment variables.
|
297
|
+
|
298
|
+
Returns:
|
299
|
+
A tuple containing:
|
300
|
+
- ModelClass: The Agno model class (e.g., DeepSeek, Groq, OpenRouter).
|
301
|
+
- team_model_id: The model ID for the team coordinator.
|
302
|
+
- agent_model_id: The model ID for the specialist agents.
|
303
|
+
"""
|
304
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
305
|
+
logger.info(f"Selected LLM Provider: {provider}")
|
306
|
+
|
307
|
+
if provider == "deepseek":
|
308
|
+
ModelClass = DeepSeek
|
309
|
+
# Use environment variables for DeepSeek model IDs if set, otherwise use defaults
|
310
|
+
team_model_id = os.environ.get("DEEPSEEK_TEAM_MODEL_ID", "deepseek-chat")
|
311
|
+
agent_model_id = os.environ.get("DEEPSEEK_AGENT_MODEL_ID", "deepseek-chat")
|
312
|
+
logger.info(f"Using DeepSeek: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
313
|
+
elif provider == "groq":
|
314
|
+
ModelClass = Groq
|
315
|
+
team_model_id = os.environ.get("GROQ_TEAM_MODEL_ID", "deepseek-r1-distill-llama-70b")
|
316
|
+
agent_model_id = os.environ.get("GROQ_AGENT_MODEL_ID", "deepseek-r1-distill-llama-70b")
|
317
|
+
logger.info(f"Using Groq: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
318
|
+
elif provider == "openrouter":
|
319
|
+
ModelClass = OpenRouter
|
320
|
+
team_model_id = os.environ.get("OPENROUTER_TEAM_MODEL_ID", "deepseek/deepseek-chat-v3-0324")
|
321
|
+
agent_model_id = os.environ.get("OPENROUTER_AGENT_MODEL_ID", "deepseek/deepseek-chat-v3-0324")
|
322
|
+
logger.info(f"Using OpenRouter: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
|
323
|
+
else:
|
324
|
+
logger.error(f"Unsupported LLM_PROVIDER: {provider}. Defaulting to DeepSeek.")
|
325
|
+
ModelClass = DeepSeek
|
326
|
+
team_model_id = "deepseek-chat"
|
327
|
+
agent_model_id = "deepseek-chat"
|
328
|
+
|
329
|
+
return ModelClass, team_model_id, agent_model_id
|
330
|
+
|
331
|
+
|
297
332
|
def create_sequential_thinking_team() -> Team:
|
298
333
|
"""
|
299
334
|
Creates and configures the Agno multi-agent team for sequential thinking,
|
@@ -303,12 +338,13 @@ def create_sequential_thinking_team() -> Team:
|
|
303
338
|
An initialized Team instance.
|
304
339
|
"""
|
305
340
|
try:
|
306
|
-
|
307
|
-
|
308
|
-
|
341
|
+
ModelClass, team_model_id, agent_model_id = get_model_config()
|
342
|
+
team_model_instance = ModelClass(id=team_model_id)
|
343
|
+
agent_model_instance = ModelClass(id=agent_model_id)
|
344
|
+
|
309
345
|
except Exception as e:
|
310
|
-
logger.error(f"Error initializing
|
311
|
-
logger.error("Please ensure the necessary API keys and configurations are set.")
|
346
|
+
logger.error(f"Error initializing models: {e}")
|
347
|
+
logger.error("Please ensure the necessary API keys and configurations are set for the selected provider ({os.environ.get('LLM_PROVIDER', 'deepseek')}).")
|
312
348
|
sys.exit(1)
|
313
349
|
|
314
350
|
# REMOVED the separate Coordinator Agent definition.
|
@@ -334,7 +370,7 @@ def create_sequential_thinking_team() -> Team:
|
|
334
370
|
" 7. Return your response to the Team Coordinator.",
|
335
371
|
"Focus on fulfilling the delegated planning sub-task accurately and efficiently.",
|
336
372
|
],
|
337
|
-
model=
|
373
|
+
model=agent_model_instance, # Use the designated agent model
|
338
374
|
add_datetime_to_instructions=True,
|
339
375
|
markdown=True
|
340
376
|
)
|
@@ -358,7 +394,7 @@ def create_sequential_thinking_team() -> Team:
|
|
358
394
|
" 7. Return your response to the Team Coordinator.",
|
359
395
|
"Focus on accuracy and relevance for the delegated research request.",
|
360
396
|
],
|
361
|
-
model=
|
397
|
+
model=agent_model_instance, # Use the designated agent model
|
362
398
|
add_datetime_to_instructions=True,
|
363
399
|
markdown=True
|
364
400
|
)
|
@@ -382,7 +418,7 @@ def create_sequential_thinking_team() -> Team:
|
|
382
418
|
" 7. Return your response to the Team Coordinator.",
|
383
419
|
"Focus on depth and clarity for the delegated analytical task.",
|
384
420
|
],
|
385
|
-
model=
|
421
|
+
model=agent_model_instance, # Use the designated agent model
|
386
422
|
add_datetime_to_instructions=True,
|
387
423
|
markdown=True
|
388
424
|
)
|
@@ -407,7 +443,7 @@ def create_sequential_thinking_team() -> Team:
|
|
407
443
|
" 8. Return your response to the Team Coordinator.",
|
408
444
|
"Focus on rigorous and constructive critique for the delegated evaluation task.",
|
409
445
|
],
|
410
|
-
model=
|
446
|
+
model=agent_model_instance, # Use the designated agent model
|
411
447
|
add_datetime_to_instructions=True,
|
412
448
|
markdown=True
|
413
449
|
)
|
@@ -430,7 +466,7 @@ def create_sequential_thinking_team() -> Team:
|
|
430
466
|
" 6. Return your response to the Team Coordinator.",
|
431
467
|
"Focus on creating clarity and coherence for the delegated synthesis task.",
|
432
468
|
],
|
433
|
-
model=
|
469
|
+
model=agent_model_instance, # Use the designated agent model
|
434
470
|
add_datetime_to_instructions=True,
|
435
471
|
markdown=True
|
436
472
|
)
|
@@ -441,7 +477,7 @@ def create_sequential_thinking_team() -> Team:
|
|
441
477
|
name="SequentialThinkingTeam",
|
442
478
|
mode="coordinate",
|
443
479
|
members=[planner, researcher, analyzer, critic, synthesizer], # ONLY specialist agents
|
444
|
-
model=
|
480
|
+
model=team_model_instance, # Model for the Team's coordination logic
|
445
481
|
description="You are the Coordinator of a specialist team processing sequential thoughts. Your role is to manage the flow, delegate tasks, and synthesize results.",
|
446
482
|
instructions=[
|
447
483
|
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
@@ -515,9 +551,17 @@ async def app_lifespan() -> AsyncIterator[None]:
|
|
515
551
|
"""Manages the application lifecycle."""
|
516
552
|
global app_context
|
517
553
|
logger.info("Initializing application resources (Coordinate Mode)...")
|
518
|
-
|
519
|
-
|
520
|
-
|
554
|
+
try:
|
555
|
+
team = create_sequential_thinking_team()
|
556
|
+
app_context = AppContext(team=team)
|
557
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
558
|
+
logger.info(f"Agno team initialized in coordinate mode using provider: {provider}.")
|
559
|
+
except Exception as e:
|
560
|
+
logger.critical(f"Failed to initialize Agno team during lifespan setup: {e}", exc_info=True)
|
561
|
+
# Decide how to handle this - re-raise, exit, or continue without a team?
|
562
|
+
# For now, re-raise to prevent server starting in a broken state.
|
563
|
+
raise e
|
564
|
+
|
521
565
|
try:
|
522
566
|
yield
|
523
567
|
finally:
|
@@ -598,7 +642,20 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
598
642
|
global app_context
|
599
643
|
if not app_context or not app_context.team:
|
600
644
|
logger.error("Application context or Agno team not initialized during tool call.")
|
601
|
-
|
645
|
+
# Attempt re-initialization cautiously, or fail hard.
|
646
|
+
# Let's try re-initialization if app_lifespan wasn't used or failed silently.
|
647
|
+
logger.warning("Attempting to re-initialize team due to missing context...")
|
648
|
+
try:
|
649
|
+
team = create_sequential_thinking_team()
|
650
|
+
app_context = AppContext(team=team) # Re-create context
|
651
|
+
logger.info("Successfully re-initialized team and context.")
|
652
|
+
except Exception as init_err:
|
653
|
+
logger.critical(f"Failed to re-initialize Agno team during tool call: {init_err}", exc_info=True)
|
654
|
+
return json.dumps({
|
655
|
+
"error": "Critical Error: Application context not available and re-initialization failed.",
|
656
|
+
"status": "critical_failure"
|
657
|
+
}, indent=2)
|
658
|
+
# Or raise Exception("Critical Error: Application context not available.")
|
602
659
|
|
603
660
|
MIN_TOTAL_THOUGHTS = 5 # Keep a minimum suggestion
|
604
661
|
|
@@ -731,7 +788,9 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
|
|
731
788
|
|
732
789
|
def run():
|
733
790
|
"""Initializes and runs the MCP server in coordinate mode."""
|
734
|
-
|
791
|
+
selected_provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
792
|
+
logger.info(f"Using provider: {selected_provider}")
|
793
|
+
logger.info(f"Initializing Sequential Thinking Server (Coordinate Mode) with Provider: {selected_provider}...")
|
735
794
|
|
736
795
|
global app_context
|
737
796
|
# Initialize application resources using the lifespan manager implicitly if running via framework
|
@@ -742,7 +801,7 @@ def run():
|
|
742
801
|
try:
|
743
802
|
team = create_sequential_thinking_team()
|
744
803
|
app_context = AppContext(team=team)
|
745
|
-
logger.info("Agno team initialized directly in coordinate mode.")
|
804
|
+
logger.info(f"Agno team initialized directly in coordinate mode using provider: {selected_provider}.")
|
746
805
|
except Exception as e:
|
747
806
|
logger.critical(f"Failed to initialize Agno team: {e}", exc_info=True)
|
748
807
|
sys.exit(1)
|
@@ -759,27 +818,37 @@ def run():
|
|
759
818
|
logger.info("Shutting down application resources...")
|
760
819
|
app_context = None # Clean up context if initialized directly
|
761
820
|
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
821
|
+
def check_environment_variables():
|
822
|
+
"""Checks for necessary environment variables based on the selected provider."""
|
823
|
+
provider = os.environ.get("LLM_PROVIDER", "deepseek").lower()
|
824
|
+
api_key_var = ""
|
825
|
+
base_url_var = "" # Some providers might not strictly need a base URL override
|
826
|
+
|
827
|
+
if provider == "deepseek":
|
828
|
+
api_key_var = "DEEPSEEK_API_KEY"
|
829
|
+
elif provider == "groq":
|
830
|
+
api_key_var = "GROQ_API_KEY"
|
831
|
+
elif provider == "openrouter":
|
832
|
+
api_key_var = "OPENROUTER_API_KEY"
|
833
|
+
if api_key_var and api_key_var not in os.environ:
|
834
|
+
logger.warning(f"{api_key_var} environment variable not found. Model initialization for '{provider}' might fail.")
|
768
835
|
try:
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
for
|
773
|
-
if hasattr(member, 'tools') and member.tools:
|
774
|
-
if any(isinstance(t, ExaTools) for t in member.tools):
|
775
|
-
uses_exa = True
|
776
|
-
break # Found it, no need to check further
|
836
|
+
ModelClass, _, _ = get_model_config() # Just need the class for dummy init
|
837
|
+
dummy_model = ModelClass(id="dummy-check") # Use a placeholder ID
|
838
|
+
researcher_for_check = Agent(name="CheckAgent", tools=[ExaTools()], model=dummy_model)
|
839
|
+
uses_exa = any(isinstance(t, ExaTools) for t in researcher_for_check.tools)
|
777
840
|
|
778
841
|
if uses_exa and "EXA_API_KEY" not in os.environ:
|
779
842
|
logger.warning("EXA_API_KEY environment variable not found, but ExaTools are configured in a team member. Researcher agent might fail.")
|
843
|
+
except Exception as e:
|
844
|
+
logger.error(f"Could not perform ExaTools check due to an error: {e}")
|
780
845
|
|
781
|
-
|
846
|
+
|
847
|
+
if __name__ == "__main__":
|
848
|
+
check_environment_variables()
|
849
|
+
try:
|
782
850
|
run()
|
783
851
|
except Exception as e:
|
784
|
-
logger.critical(f"Failed during
|
852
|
+
logger.critical(f"Failed during server run: {e}", exc_info=True)
|
785
853
|
sys.exit(1)
|
854
|
+
|
@@ -1,12 +1,13 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mcp-server-mas-sequential-thinking
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: MCP Agent Implementation for Sequential Thinking
|
5
5
|
Author-email: Frad LEE <fradser@gmail.com>
|
6
6
|
Requires-Python: >=3.10
|
7
7
|
Requires-Dist: agno
|
8
8
|
Requires-Dist: asyncio
|
9
9
|
Requires-Dist: exa-py
|
10
|
+
Requires-Dist: groq
|
10
11
|
Requires-Dist: mcp
|
11
12
|
Requires-Dist: python-dotenv
|
12
13
|
Provides-Extra: dev
|
@@ -80,8 +81,11 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
80
81
|
## Prerequisites
|
81
82
|
|
82
83
|
* Python 3.10+
|
83
|
-
* Access to a compatible LLM API (configured for `agno
|
84
|
-
*
|
84
|
+
* Access to a compatible LLM API (configured for `agno`). The system now supports:
|
85
|
+
* **Groq:** Requires `GROQ_API_KEY`.
|
86
|
+
* **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
|
87
|
+
* **OpenRouter:** Requires `OPENROUTER_API_KEY`.
|
88
|
+
* Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
|
85
89
|
* Exa API Key (if using the Researcher agent's capabilities)
|
86
90
|
* `EXA_API_KEY` environment variable.
|
87
91
|
* `uv` package manager (recommended) or `pip`.
|
@@ -90,7 +94,9 @@ This parallel processing leads to substantially higher token usage (potentially
|
|
90
94
|
|
91
95
|
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
92
96
|
|
93
|
-
|
97
|
+
The `env` section should include the API key for your chosen `LLM_PROVIDER`.
|
98
|
+
|
99
|
+
```json
|
94
100
|
{
|
95
101
|
"mcpServers": {
|
96
102
|
"mas-sequential-thinking": {
|
@@ -98,14 +104,17 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
98
104
|
"args": [
|
99
105
|
"mcp-server-mas-sequential-thinking"
|
100
106
|
],
|
101
|
-
env": {
|
102
|
-
"
|
103
|
-
"
|
104
|
-
"
|
107
|
+
"env": {
|
108
|
+
"LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
|
109
|
+
// "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
|
110
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
|
111
|
+
// "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
|
112
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
|
113
|
+
"EXA_API_KEY": "your_exa_api_key" // Only if using Exa
|
105
114
|
}
|
106
115
|
}
|
107
116
|
}
|
108
|
-
}
|
117
|
+
}
|
109
118
|
```
|
110
119
|
|
111
120
|
## Installation & Setup
|
@@ -119,10 +128,31 @@ This server runs as a standard executable script that communicates via stdio, as
|
|
119
128
|
2. **Set Environment Variables:**
|
120
129
|
Create a `.env` file in the root directory or export the variables:
|
121
130
|
```dotenv
|
122
|
-
#
|
123
|
-
|
124
|
-
|
131
|
+
# --- LLM Configuration ---
|
132
|
+
# Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
|
133
|
+
LLM_PROVIDER="deepseek"
|
125
134
|
|
135
|
+
# Provide the API key for the chosen provider:
|
136
|
+
# GROQ_API_KEY="your_groq_api_key"
|
137
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
138
|
+
# OPENROUTER_API_KEY="your_openrouter_api_key"
|
139
|
+
|
140
|
+
# Optional: Base URL override (e.g., for custom DeepSeek endpoints)
|
141
|
+
DEEPSEEK_BASE_URL="your_base_url_if_needed"
|
142
|
+
|
143
|
+
# Optional: Specify different models for Team Coordinator and Specialist Agents
|
144
|
+
# Defaults are set within the code based on the provider if these are not set.
|
145
|
+
# Example for Groq:
|
146
|
+
# GROQ_TEAM_MODEL_ID="llama3-70b-8192"
|
147
|
+
# GROQ_AGENT_MODEL_ID="llama3-8b-8192"
|
148
|
+
# Example for DeepSeek:
|
149
|
+
# DEEPSEEK_TEAM_MODEL_ID="deepseek-chat"
|
150
|
+
# DEEPSEEK_AGENT_MODEL_ID="deepseek-coder"
|
151
|
+
# Example for OpenRouter:
|
152
|
+
# OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
|
153
|
+
# OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
|
154
|
+
|
155
|
+
# --- External Tools ---
|
126
156
|
# Required ONLY if the Researcher agent is used and needs Exa
|
127
157
|
EXA_API_KEY="your_exa_api_key"
|
128
158
|
```
|
@@ -0,0 +1,5 @@
|
|
1
|
+
main.py,sha256=YQJSQmXFxCHqvBxIYwPWPZHAnjkSIB_xrs3zBQqaqL8,44014
|
2
|
+
mcp_server_mas_sequential_thinking-0.2.0.dist-info/METADATA,sha256=HESwtzCeJim3YMfvJiWKxieIo90F1-0zRQH7Nc2QlQY,14812
|
3
|
+
mcp_server_mas_sequential_thinking-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
4
|
+
mcp_server_mas_sequential_thinking-0.2.0.dist-info/entry_points.txt,sha256=wY2jq_6PmuqyKQzNnL6famc7DXnQiEhVnq3umzNVNiE,64
|
5
|
+
mcp_server_mas_sequential_thinking-0.2.0.dist-info/RECORD,,
|
@@ -1,5 +0,0 @@
|
|
1
|
-
main.py,sha256=mDC2ayNFA5ON0v4PMhpWijIHxlk_5h7ak8x6shcqORQ,40049
|
2
|
-
mcp_server_mas_sequential_thinking-0.1.3.dist-info/METADATA,sha256=XHTnbZ9kt63Lc5g_tgy6wCbgIup0Owy0kwwHffT7yNs,13349
|
3
|
-
mcp_server_mas_sequential_thinking-0.1.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
4
|
-
mcp_server_mas_sequential_thinking-0.1.3.dist-info/entry_points.txt,sha256=wY2jq_6PmuqyKQzNnL6famc7DXnQiEhVnq3umzNVNiE,64
|
5
|
-
mcp_server_mas_sequential_thinking-0.1.3.dist-info/RECORD,,
|
File without changes
|