mcp-server-mas-sequential-thinking 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
main.py
ADDED
@@ -0,0 +1,785 @@
|
|
1
|
+
import json
|
2
|
+
import os
|
3
|
+
import sys
|
4
|
+
from contextlib import asynccontextmanager
|
5
|
+
from dataclasses import dataclass, field
|
6
|
+
from datetime import datetime
|
7
|
+
from typing import Any, AsyncIterator, Dict, List, Optional
|
8
|
+
|
9
|
+
from mcp.server.fastmcp import FastMCP
|
10
|
+
from agno.agent import Agent
|
11
|
+
from agno.models.deepseek import DeepSeek
|
12
|
+
from agno.team.team import Team
|
13
|
+
from agno.tools.exa import ExaTools
|
14
|
+
from agno.tools.thinking import ThinkingTools
|
15
|
+
from dotenv import load_dotenv
|
16
|
+
from pydantic import (BaseModel, ConfigDict, Field, ValidationError,
|
17
|
+
field_validator, model_validator)
|
18
|
+
|
19
|
+
# Add logging imports and setup
|
20
|
+
import logging
|
21
|
+
import logging.handlers
|
22
|
+
from pathlib import Path
|
23
|
+
|
24
|
+
# Configure logging system
|
25
|
+
def setup_logging() -> logging.Logger:
|
26
|
+
"""
|
27
|
+
Set up application logging with both file and console handlers.
|
28
|
+
Logs will be stored in the user's home directory under .sequential_thinking/logs.
|
29
|
+
|
30
|
+
Returns:
|
31
|
+
Logger instance configured with both handlers.
|
32
|
+
"""
|
33
|
+
# Create logs directory in user's home
|
34
|
+
home_dir = Path.home()
|
35
|
+
log_dir = home_dir / ".sequential_thinking" / "logs"
|
36
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
37
|
+
|
38
|
+
# Create logger
|
39
|
+
logger = logging.getLogger("sequential_thinking")
|
40
|
+
logger.setLevel(logging.DEBUG)
|
41
|
+
|
42
|
+
# Log format
|
43
|
+
formatter = logging.Formatter(
|
44
|
+
'%(asctime)s - %(levelname)s - [%(name)s] - %(message)s',
|
45
|
+
datefmt='%Y-%m-%d %H:%M:%S'
|
46
|
+
)
|
47
|
+
|
48
|
+
# File handler with rotation
|
49
|
+
file_handler = logging.handlers.RotatingFileHandler(
|
50
|
+
log_dir / "sequential_thinking.log",
|
51
|
+
maxBytes=10*1024*1024, # 10MB
|
52
|
+
backupCount=5,
|
53
|
+
encoding='utf-8'
|
54
|
+
)
|
55
|
+
file_handler.setLevel(logging.DEBUG)
|
56
|
+
file_handler.setFormatter(formatter)
|
57
|
+
|
58
|
+
# Console handler
|
59
|
+
console_handler = logging.StreamHandler(sys.stderr)
|
60
|
+
console_handler.setLevel(logging.INFO)
|
61
|
+
console_handler.setFormatter(formatter)
|
62
|
+
|
63
|
+
# Add handlers to logger
|
64
|
+
logger.addHandler(file_handler)
|
65
|
+
logger.addHandler(console_handler)
|
66
|
+
|
67
|
+
return logger
|
68
|
+
|
69
|
+
# Initialize logger
|
70
|
+
logger = setup_logging()
|
71
|
+
|
72
|
+
# Load environment variables from .env file
|
73
|
+
load_dotenv()
|
74
|
+
|
75
|
+
# --- Pydantic Model for Tool Input Schema ---
|
76
|
+
|
77
|
+
class ThoughtData(BaseModel):
|
78
|
+
"""
|
79
|
+
Represents the data structure for a single thought in the sequential
|
80
|
+
thinking process. This model is used as the input schema for the
|
81
|
+
'sequentialthinking' tool.
|
82
|
+
"""
|
83
|
+
thought: str = Field(
|
84
|
+
...,
|
85
|
+
description="The content of the current thought or step. Make it specific enough to imply the desired action (e.g., 'Analyze X', 'Critique Y', 'Plan Z', 'Research A').",
|
86
|
+
min_length=1
|
87
|
+
)
|
88
|
+
thoughtNumber: int = Field(
|
89
|
+
...,
|
90
|
+
description="The sequence number of this thought.",
|
91
|
+
ge=1
|
92
|
+
)
|
93
|
+
totalThoughts: int = Field(
|
94
|
+
...,
|
95
|
+
description="The estimated total thoughts required.",
|
96
|
+
ge=1
|
97
|
+
)
|
98
|
+
nextThoughtNeeded: bool = Field(
|
99
|
+
...,
|
100
|
+
description="Indicates if another thought step is needed after this one."
|
101
|
+
)
|
102
|
+
isRevision: bool = Field(
|
103
|
+
False,
|
104
|
+
description="Indicates if this thought revises a previous thought."
|
105
|
+
)
|
106
|
+
revisesThought: Optional[int] = Field(
|
107
|
+
None,
|
108
|
+
description="The number of the thought being revised, if isRevision is True.",
|
109
|
+
ge=1
|
110
|
+
)
|
111
|
+
branchFromThought: Optional[int] = Field(
|
112
|
+
None,
|
113
|
+
description="The thought number from which this thought branches.",
|
114
|
+
ge=1
|
115
|
+
)
|
116
|
+
branchId: Optional[str] = Field(
|
117
|
+
None,
|
118
|
+
description="An identifier for the branch, if branching."
|
119
|
+
)
|
120
|
+
needsMoreThoughts: bool = Field(
|
121
|
+
False,
|
122
|
+
description="Indicates if more thoughts are needed beyond the current estimate."
|
123
|
+
)
|
124
|
+
|
125
|
+
# Pydantic model configuration
|
126
|
+
model_config = ConfigDict(
|
127
|
+
validate_assignment=True,
|
128
|
+
extra="forbid",
|
129
|
+
frozen=True, # Consider making it mutable if logic needs modification within tool
|
130
|
+
arbitrary_types_allowed=True,
|
131
|
+
json_schema_extra={
|
132
|
+
"examples": [
|
133
|
+
{
|
134
|
+
"thought": "Analyze the core assumptions of the previous step.",
|
135
|
+
"thoughtNumber": 2,
|
136
|
+
"totalThoughts": 5,
|
137
|
+
"nextThoughtNeeded": True,
|
138
|
+
"isRevision": False,
|
139
|
+
"revisesThought": None,
|
140
|
+
"branchFromThought": None,
|
141
|
+
"branchId": None,
|
142
|
+
"needsMoreThoughts": False
|
143
|
+
},
|
144
|
+
{
|
145
|
+
"thought": "Critique the proposed solution for potential biases.",
|
146
|
+
"thoughtNumber": 4,
|
147
|
+
"totalThoughts": 5,
|
148
|
+
"nextThoughtNeeded": True,
|
149
|
+
"isRevision": False,
|
150
|
+
"revisesThought": None,
|
151
|
+
"branchFromThought": None,
|
152
|
+
"branchId": None,
|
153
|
+
"needsMoreThoughts": False
|
154
|
+
}
|
155
|
+
]
|
156
|
+
}
|
157
|
+
)
|
158
|
+
|
159
|
+
# --- Validators ---
|
160
|
+
|
161
|
+
@field_validator('revisesThought')
|
162
|
+
@classmethod
|
163
|
+
def validate_revises_thought(cls, v: Optional[int], values: Dict[str, Any]) -> Optional[int]:
|
164
|
+
is_revision = values.data.get('isRevision', False)
|
165
|
+
if v is not None and not is_revision:
|
166
|
+
raise ValueError('revisesThought can only be set when isRevision is True')
|
167
|
+
if v is not None and 'thoughtNumber' in values.data and v >= values.data['thoughtNumber']:
|
168
|
+
raise ValueError('revisesThought must be less than thoughtNumber')
|
169
|
+
return v
|
170
|
+
|
171
|
+
@field_validator('branchId')
|
172
|
+
@classmethod
|
173
|
+
def validate_branch_id(cls, v: Optional[str], values: Dict[str, Any]) -> Optional[str]:
|
174
|
+
branch_from_thought = values.data.get('branchFromThought')
|
175
|
+
if v is not None and branch_from_thought is None:
|
176
|
+
raise ValueError('branchId can only be set when branchFromThought is set')
|
177
|
+
return v
|
178
|
+
|
179
|
+
@model_validator(mode='after')
|
180
|
+
def validate_thought_numbers(self) -> 'ThoughtData':
|
181
|
+
# Allow thoughtNumber > totalThoughts for dynamic adjustment downstream
|
182
|
+
# revisesThought validation moved to field_validator for better context access
|
183
|
+
if self.branchFromThought is not None and self.branchFromThought >= self.thoughtNumber:
|
184
|
+
raise ValueError('branchFromThought must be less than thoughtNumber')
|
185
|
+
return self
|
186
|
+
|
187
|
+
def dict(self) -> Dict[str, Any]:
|
188
|
+
"""Convert thought data to dictionary format for serialization"""
|
189
|
+
# Use Pydantic's built-in method, handling potential None values if needed
|
190
|
+
return self.model_dump(exclude_none=True)
|
191
|
+
|
192
|
+
|
193
|
+
# --- Utility for Formatting Thoughts (for Logging) ---
|
194
|
+
|
195
|
+
def format_thought_for_log(thought_data: ThoughtData) -> str:
|
196
|
+
"""Formats a thought for logging purposes, handling multi-byte characters."""
|
197
|
+
prefix = ''
|
198
|
+
context = ''
|
199
|
+
branch_info_log = '' # Added for explicit branch tracking in log
|
200
|
+
|
201
|
+
if thought_data.isRevision and thought_data.revisesThought is not None:
|
202
|
+
prefix = '🔄 Revision'
|
203
|
+
context = f' (revising thought {thought_data.revisesThought})'
|
204
|
+
elif thought_data.branchFromThought is not None and thought_data.branchId is not None:
|
205
|
+
prefix = '🌿 Branch'
|
206
|
+
context = f' (from thought {thought_data.branchFromThought}, ID: {thought_data.branchId})'
|
207
|
+
# Add visual indication of the branch path in the log
|
208
|
+
# This requires accessing the history, let's assume app_context is accessible or passed
|
209
|
+
# For simplicity here, we just note it's a branch. More complex viz needs context access.
|
210
|
+
branch_info_log = f"Branch Details: ID='{thought_data.branchId}', originates from Thought #{thought_data.branchFromThought}"
|
211
|
+
else:
|
212
|
+
prefix = '💭 Thought'
|
213
|
+
context = ''
|
214
|
+
|
215
|
+
header = f"{prefix} {thought_data.thoughtNumber}/{thought_data.totalThoughts}{context}"
|
216
|
+
|
217
|
+
# Helper to get visual width of a string (approximates multi-byte characters)
|
218
|
+
def get_visual_width(s: str) -> int:
|
219
|
+
width = 0
|
220
|
+
for char in s:
|
221
|
+
# Basic approximation: Wide characters (e.g., CJK) take 2 cells, others 1
|
222
|
+
if 0x1100 <= ord(char) <= 0x115F or \
|
223
|
+
0x2329 <= ord(char) <= 0x232A or \
|
224
|
+
0x2E80 <= ord(char) <= 0x3247 or \
|
225
|
+
0x3250 <= ord(char) <= 0x4DBF or \
|
226
|
+
0x4E00 <= ord(char) <= 0xA4C6 or \
|
227
|
+
0xA960 <= ord(char) <= 0xA97C or \
|
228
|
+
0xAC00 <= ord(char) <= 0xD7A3 or \
|
229
|
+
0xF900 <= ord(char) <= 0xFAFF or \
|
230
|
+
0xFE10 <= ord(char) <= 0xFE19 or \
|
231
|
+
0xFE30 <= ord(char) <= 0xFE6F or \
|
232
|
+
0xFF00 <= ord(char) <= 0xFF60 or \
|
233
|
+
0xFFE0 <= ord(char) <= 0xFFE6 or \
|
234
|
+
0x1B000 <= ord(char) <= 0x1B001 or \
|
235
|
+
0x1F200 <= ord(char) <= 0x1F251 or \
|
236
|
+
0x1F300 <= ord(char) <= 0x1F64F or \
|
237
|
+
0x1F680 <= ord(char) <= 0x1F6FF:
|
238
|
+
width += 2
|
239
|
+
else:
|
240
|
+
width += 1
|
241
|
+
return width
|
242
|
+
|
243
|
+
header_width = get_visual_width(header)
|
244
|
+
thought_width = get_visual_width(thought_data.thought)
|
245
|
+
max_inner_width = max(header_width, thought_width)
|
246
|
+
border_len = max_inner_width + 4 # Accounts for '│ ' and ' │'
|
247
|
+
|
248
|
+
border = '─' * (border_len - 2) # Border width between corners
|
249
|
+
|
250
|
+
# Wrap thought text correctly based on visual width
|
251
|
+
thought_lines = []
|
252
|
+
current_line = ""
|
253
|
+
current_width = 0
|
254
|
+
words = thought_data.thought.split()
|
255
|
+
for i, word in enumerate(words):
|
256
|
+
word_width = get_visual_width(word)
|
257
|
+
space_width = 1 if current_line else 0
|
258
|
+
|
259
|
+
if current_width + space_width + word_width <= max_inner_width:
|
260
|
+
current_line += (" " if current_line else "") + word
|
261
|
+
current_width += space_width + word_width
|
262
|
+
else:
|
263
|
+
thought_lines.append(current_line)
|
264
|
+
current_line = word
|
265
|
+
current_width = word_width
|
266
|
+
|
267
|
+
# Add the last line if it exists
|
268
|
+
if i == len(words) - 1 and current_line:
|
269
|
+
thought_lines.append(current_line)
|
270
|
+
|
271
|
+
|
272
|
+
# Format lines with padding
|
273
|
+
formatted_header = f"│ {header}{' ' * (max_inner_width - header_width)} │"
|
274
|
+
formatted_thought_lines = [
|
275
|
+
f"│ {line}{' ' * (max_inner_width - get_visual_width(line))} │"
|
276
|
+
for line in thought_lines
|
277
|
+
]
|
278
|
+
|
279
|
+
# Include branch info in the log box if applicable
|
280
|
+
formatted_branch_info = ''
|
281
|
+
if branch_info_log:
|
282
|
+
branch_info_width = get_visual_width(branch_info_log)
|
283
|
+
padding = ' ' * (max_inner_width - branch_info_width)
|
284
|
+
formatted_branch_info = f"\n│ {branch_info_log}{padding} │\n├{'─' * (border_len - 2)}┤"
|
285
|
+
|
286
|
+
return f"""
|
287
|
+
┌{border}┐
|
288
|
+
{formatted_header}
|
289
|
+
├{border}┤
|
290
|
+
{''.join(formatted_thought_lines)}
|
291
|
+
{formatted_branch_info} # Insert branch info line here if it exists
|
292
|
+
└{border}┘"""
|
293
|
+
|
294
|
+
|
295
|
+
# --- Agno Multi-Agent Team Setup ---
|
296
|
+
|
297
|
+
def create_sequential_thinking_team() -> Team:
|
298
|
+
"""
|
299
|
+
Creates and configures the Agno multi-agent team for sequential thinking,
|
300
|
+
using 'coordinate' mode. The Team object itself acts as the coordinator.
|
301
|
+
|
302
|
+
Returns:
|
303
|
+
An initialized Team instance.
|
304
|
+
"""
|
305
|
+
try:
|
306
|
+
# Use a capable model for the team coordinator logic and specialists
|
307
|
+
# The Team itself needs a model for its coordination instructions.
|
308
|
+
team_model = DeepSeek(id="deepseek-chat")
|
309
|
+
except Exception as e:
|
310
|
+
logger.error(f"Error initializing base model: {e}")
|
311
|
+
logger.error("Please ensure the necessary API keys and configurations are set.")
|
312
|
+
sys.exit(1)
|
313
|
+
|
314
|
+
# REMOVED the separate Coordinator Agent definition.
|
315
|
+
# The Team object below will handle coordination using its own instructions/model.
|
316
|
+
|
317
|
+
# Agent definitions for specialists
|
318
|
+
planner = Agent(
|
319
|
+
name="Planner",
|
320
|
+
role="Strategic Planner",
|
321
|
+
description="Develops strategic plans and roadmaps based on delegated sub-tasks.",
|
322
|
+
tools=[ThinkingTools()],
|
323
|
+
instructions=[
|
324
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
325
|
+
"You are the Strategic Planner specialist.",
|
326
|
+
"You will receive specific sub-tasks from the Team Coordinator related to planning, strategy, or process design.",
|
327
|
+
"**When you receive a sub-task:**",
|
328
|
+
" 1. Understand the specific planning requirement delegated to you.",
|
329
|
+
" 2. Use the `think` tool as a scratchpad if needed to outline your steps or potential non-linear points relevant *to your sub-task*.",
|
330
|
+
" 3. Develop the requested plan, roadmap, or sequence of steps.",
|
331
|
+
" 4. Identify any potential revision/branching points *specifically related to your plan* and note them.",
|
332
|
+
" 5. Consider constraints or potential roadblocks relevant to your assigned task.",
|
333
|
+
" 6. Formulate a clear and concise response containing the requested planning output.",
|
334
|
+
" 7. Return your response to the Team Coordinator.",
|
335
|
+
"Focus on fulfilling the delegated planning sub-task accurately and efficiently.",
|
336
|
+
],
|
337
|
+
model=team_model, # Specialists can use the same model or different ones
|
338
|
+
add_datetime_to_instructions=True,
|
339
|
+
markdown=True
|
340
|
+
)
|
341
|
+
|
342
|
+
researcher = Agent(
|
343
|
+
name="Researcher",
|
344
|
+
role="Information Gatherer",
|
345
|
+
description="Gathers and validates information based on delegated research sub-tasks.",
|
346
|
+
tools=[ThinkingTools(), ExaTools()],
|
347
|
+
instructions=[
|
348
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
349
|
+
"You are the Information Gatherer specialist.",
|
350
|
+
"You will receive specific sub-tasks from the Team Coordinator requiring information gathering or verification.",
|
351
|
+
"**When you receive a sub-task:**",
|
352
|
+
" 1. Identify the specific information requested in the delegated task.",
|
353
|
+
" 2. Use your tools (like Exa) to find relevant facts, data, or context. Use the `think` tool to plan queries or structure findings if needed.",
|
354
|
+
" 3. Validate information where possible.",
|
355
|
+
" 4. Structure your findings clearly.",
|
356
|
+
" 5. Note any significant information gaps encountered during your research for the specific sub-task.",
|
357
|
+
" 6. Formulate a response containing the research findings relevant to the sub-task.",
|
358
|
+
" 7. Return your response to the Team Coordinator.",
|
359
|
+
"Focus on accuracy and relevance for the delegated research request.",
|
360
|
+
],
|
361
|
+
model=team_model,
|
362
|
+
add_datetime_to_instructions=True,
|
363
|
+
markdown=True
|
364
|
+
)
|
365
|
+
|
366
|
+
analyzer = Agent(
|
367
|
+
name="Analyzer",
|
368
|
+
role="Core Analyst",
|
369
|
+
description="Performs analysis based on delegated analytical sub-tasks.",
|
370
|
+
tools=[ThinkingTools()],
|
371
|
+
instructions=[
|
372
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
373
|
+
"You are the Core Analyst specialist.",
|
374
|
+
"You will receive specific sub-tasks from the Team Coordinator requiring analysis, pattern identification, or logical evaluation.",
|
375
|
+
"**When you receive a sub-task:**",
|
376
|
+
" 1. Understand the specific analytical requirement of the delegated task.",
|
377
|
+
" 2. Use the `think` tool as a scratchpad if needed to outline your analysis framework or draft insights related *to your sub-task*.",
|
378
|
+
" 3. Perform the requested analysis (e.g., break down components, identify patterns, evaluate logic).",
|
379
|
+
" 4. Generate concise insights based on your analysis of the sub-task.",
|
380
|
+
" 5. Based on your analysis, identify any significant logical inconsistencies or invalidated premises *within the scope of your sub-task* that you should highlight in your response.",
|
381
|
+
" 6. Formulate a response containing your analytical findings and insights.",
|
382
|
+
" 7. Return your response to the Team Coordinator.",
|
383
|
+
"Focus on depth and clarity for the delegated analytical task.",
|
384
|
+
],
|
385
|
+
model=team_model,
|
386
|
+
add_datetime_to_instructions=True,
|
387
|
+
markdown=True
|
388
|
+
)
|
389
|
+
|
390
|
+
critic = Agent(
|
391
|
+
name="Critic",
|
392
|
+
role="Quality Controller",
|
393
|
+
description="Critically evaluates ideas or assumptions based on delegated critique sub-tasks.",
|
394
|
+
tools=[ThinkingTools()],
|
395
|
+
instructions=[
|
396
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
397
|
+
"You are the Quality Controller specialist.",
|
398
|
+
"You will receive specific sub-tasks from the Team Coordinator requiring critique, evaluation of assumptions, or identification of flaws.",
|
399
|
+
"**When you receive a sub-task:**",
|
400
|
+
" 1. Understand the specific aspect requiring critique in the delegated task.",
|
401
|
+
" 2. Use the `think` tool as a scratchpad if needed to list assumptions or potential weaknesses related *to your sub-task*.",
|
402
|
+
" 3. Critically evaluate the provided information or premise as requested.",
|
403
|
+
" 4. Identify potential biases, flaws, or logical fallacies within the scope of the sub-task.",
|
404
|
+
" 5. Suggest specific improvements or point out weaknesses constructively.",
|
405
|
+
" 6. If your critique reveals significant flaws or outdated assumptions *within the scope of your sub-task*, highlight this clearly in your response.",
|
406
|
+
" 7. Formulate a response containing your critical evaluation and recommendations.",
|
407
|
+
" 8. Return your response to the Team Coordinator.",
|
408
|
+
"Focus on rigorous and constructive critique for the delegated evaluation task.",
|
409
|
+
],
|
410
|
+
model=team_model,
|
411
|
+
add_datetime_to_instructions=True,
|
412
|
+
markdown=True
|
413
|
+
)
|
414
|
+
|
415
|
+
synthesizer = Agent(
|
416
|
+
name="Synthesizer",
|
417
|
+
role="Integration Specialist",
|
418
|
+
description="Integrates information or forms conclusions based on delegated synthesis sub-tasks.",
|
419
|
+
tools=[ThinkingTools()],
|
420
|
+
instructions=[
|
421
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
422
|
+
"You are the Integration Specialist.",
|
423
|
+
"You will receive specific sub-tasks from the Team Coordinator requiring integration of information, synthesis of ideas, or formation of conclusions.",
|
424
|
+
"**When you receive a sub-task:**",
|
425
|
+
" 1. Understand the specific elements needing integration or synthesis in the delegated task.",
|
426
|
+
" 2. Use the `think` tool as a scratchpad if needed to outline connections or draft conclusions related *to your sub-task*.",
|
427
|
+
" 3. Connect the provided elements, identify overarching themes, or draw conclusions as requested.",
|
428
|
+
" 4. Distill complex inputs into clear, structured insights for the sub-task.",
|
429
|
+
" 5. Formulate a response presenting the synthesized information or conclusions.",
|
430
|
+
" 6. Return your response to the Team Coordinator.",
|
431
|
+
"Focus on creating clarity and coherence for the delegated synthesis task.",
|
432
|
+
],
|
433
|
+
model=team_model,
|
434
|
+
add_datetime_to_instructions=True,
|
435
|
+
markdown=True
|
436
|
+
)
|
437
|
+
|
438
|
+
# Create the team with coordinate mode.
|
439
|
+
# The Team object itself acts as the coordinator, using the instructions/description/model provided here.
|
440
|
+
team = Team(
|
441
|
+
name="SequentialThinkingTeam",
|
442
|
+
mode="coordinate",
|
443
|
+
members=[planner, researcher, analyzer, critic, synthesizer], # ONLY specialist agents
|
444
|
+
model=team_model, # Model for the Team's coordination logic
|
445
|
+
description="You are the Coordinator of a specialist team processing sequential thoughts. Your role is to manage the flow, delegate tasks, and synthesize results.",
|
446
|
+
instructions=[
|
447
|
+
f"Current date and time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
448
|
+
"You are the Coordinator managing a team of specialists (Planner, Researcher, Analyzer, Critic, Synthesizer) in 'coordinate' mode.",
|
449
|
+
"Your core responsibilities when receiving an input thought:",
|
450
|
+
" 1. Analyze the input thought, considering its type (e.g., initial planning, analysis, revision, branch).",
|
451
|
+
" 2. Break the thought down into specific, actionable sub-tasks suitable for your specialist team members.",
|
452
|
+
" 3. Determine the MINIMUM set of specialists required to address the thought comprehensively.",
|
453
|
+
" 4. Delegate the appropriate sub-task(s) ONLY to the essential specialists identified. Provide clear instructions and necessary context (like previous thought content if revising/branching) for each sub-task.",
|
454
|
+
" 5. Await responses from the delegated specialist(s).",
|
455
|
+
" 6. Synthesize the responses from the specialist(s) into a single, cohesive, and comprehensive response addressing the original input thought.",
|
456
|
+
" 7. Based on the synthesis and specialist feedback, identify potential needs for revision of previous thoughts or branching to explore alternatives.",
|
457
|
+
" 8. Include clear recommendations in your final synthesized response if revision or branching is needed. Use formats like 'RECOMMENDATION: Revise thought #X...' or 'SUGGESTION: Consider branching from thought #Y...'.",
|
458
|
+
" 9. Ensure the final synthesized response directly addresses the initial input thought and provides necessary guidance for the next step in the sequence.",
|
459
|
+
"Delegation Criteria:",
|
460
|
+
" - Choose specialists based on the primary actions implied by the thought (planning, research, analysis, critique, synthesis).",
|
461
|
+
" - **Prioritize Efficiency:** Delegate sub-tasks only to specialists whose expertise is *strictly necessary*. Aim to minimize concurrent delegations.",
|
462
|
+
" - Provide context: Include relevant parts of the input thought or previous context when delegating.",
|
463
|
+
"Synthesis:",
|
464
|
+
" - Integrate specialist responses logically.",
|
465
|
+
" - Resolve conflicts or highlight discrepancies.",
|
466
|
+
" - Formulate a final answer representing the combined effort.",
|
467
|
+
"Remember: Orchestrate the team effectively and efficiently."
|
468
|
+
],
|
469
|
+
success_criteria=[
|
470
|
+
"Break down input thoughts into appropriate sub-tasks",
|
471
|
+
"Delegate sub-tasks efficiently to the most relevant specialists",
|
472
|
+
"Specialists execute delegated sub-tasks accurately",
|
473
|
+
"Synthesize specialist responses into a cohesive final output addressing the original thought",
|
474
|
+
"Identify and recommend necessary revisions or branches based on analysis"
|
475
|
+
],
|
476
|
+
enable_agentic_context=False, # Allows context sharing managed by the Team (coordinator)
|
477
|
+
share_member_interactions=False, # Allows members' interactions to be shared
|
478
|
+
markdown=True,
|
479
|
+
debug_mode=False
|
480
|
+
)
|
481
|
+
|
482
|
+
return team
|
483
|
+
|
484
|
+
# --- Application Context and Lifespan Management ---
|
485
|
+
|
486
|
+
@dataclass
|
487
|
+
class AppContext:
|
488
|
+
"""Holds shared application resources, like the Agno team."""
|
489
|
+
team: Team
|
490
|
+
thought_history: List[ThoughtData] = field(default_factory=list)
|
491
|
+
branches: Dict[str, List[ThoughtData]] = field(default_factory=dict)
|
492
|
+
|
493
|
+
def add_thought(self, thought: ThoughtData) -> None:
|
494
|
+
"""Add a thought to history and manage branches"""
|
495
|
+
self.thought_history.append(thought)
|
496
|
+
|
497
|
+
# Handle branching
|
498
|
+
if thought.branchFromThought is not None and thought.branchId is not None:
|
499
|
+
if thought.branchId not in self.branches:
|
500
|
+
self.branches[thought.branchId] = []
|
501
|
+
self.branches[thought.branchId].append(thought)
|
502
|
+
|
503
|
+
def get_branch_thoughts(self, branch_id: str) -> List[ThoughtData]:
|
504
|
+
"""Get all thoughts in a specific branch"""
|
505
|
+
return self.branches.get(branch_id, [])
|
506
|
+
|
507
|
+
def get_all_branches(self) -> Dict[str, int]:
|
508
|
+
"""Get all branch IDs and their thought counts"""
|
509
|
+
return {branch_id: len(thoughts) for branch_id, thoughts in self.branches.items()}
|
510
|
+
|
511
|
+
app_context: Optional[AppContext] = None
|
512
|
+
|
513
|
+
@asynccontextmanager
|
514
|
+
async def app_lifespan() -> AsyncIterator[None]:
|
515
|
+
"""Manages the application lifecycle."""
|
516
|
+
global app_context
|
517
|
+
logger.info("Initializing application resources (Coordinate Mode)...")
|
518
|
+
team = create_sequential_thinking_team()
|
519
|
+
app_context = AppContext(team=team)
|
520
|
+
logger.info("Agno team initialized in coordinate mode.")
|
521
|
+
try:
|
522
|
+
yield
|
523
|
+
finally:
|
524
|
+
logger.info("Shutting down application resources...")
|
525
|
+
app_context = None
|
526
|
+
|
527
|
+
# Initialize FastMCP
|
528
|
+
mcp = FastMCP()
|
529
|
+
|
530
|
+
# --- MCP Handlers ---
|
531
|
+
|
532
|
+
@mcp.prompt("sequential-thinking-starter")
|
533
|
+
def sequential_thinking_starter(problem: str, context: str = ""):
|
534
|
+
"""
|
535
|
+
Starter prompt for sequential thinking that ENCOURAGES non-linear exploration
|
536
|
+
using coordinate mode.
|
537
|
+
"""
|
538
|
+
min_thoughts = 5 # Set a reasonable minimum number of initial thoughts
|
539
|
+
|
540
|
+
prompt_text = f"""Initiate a comprehensive sequential thinking process for the following problem:
|
541
|
+
|
542
|
+
Problem: {problem}
|
543
|
+
{f'Context: {context}' if context else ''}
|
544
|
+
|
545
|
+
**Sequential Thinking Goals & Guidelines (Coordinate Mode):**
|
546
|
+
|
547
|
+
1. **Estimate Steps:** Analyze the problem complexity. Your initial `totalThoughts` estimate should be at least {min_thoughts}.
|
548
|
+
2. **First Thought:** Call the 'sequentialthinking' tool with `thoughtNumber: 1`, your estimated `totalThoughts` (at least {min_thoughts}), and `nextThoughtNeeded: True`. Structure your first thought as: "Plan a comprehensive analysis approach for: {problem}"
|
549
|
+
3. **Encouraged Revision:** Actively look for opportunities to revise previous thoughts if you identify flaws, oversights, or necessary refinements based on later analysis (especially from the Coordinator synthesizing Critic/Analyzer outputs). Use `isRevision: True` and `revisesThought: <thought_number>` when performing a revision. Robust thinking often involves self-correction. Look for 'RECOMMENDATION: Revise thought #X...' in the Coordinator's response.
|
550
|
+
4. **Encouraged Branching:** Explore alternative paths, perspectives, or solutions where appropriate. Use `branchFromThought: <thought_number>` and `branchId: <unique_branch_name>` to initiate branches. Exploring alternatives is key to thorough analysis. Consider suggestions for branching proposed by the Coordinator (e.g., 'SUGGESTION: Consider branching...').
|
551
|
+
5. **Extension:** If the analysis requires more steps than initially estimated, use `needsMoreThoughts: True` on the thought *before* you need the extension.
|
552
|
+
6. **Thought Content:** Each thought must:
|
553
|
+
* Be detailed and specific to the current stage (planning, analysis, critique, synthesis, revision, branching).
|
554
|
+
* Clearly explain the *reasoning* behind the thought, especially for revisions and branches.
|
555
|
+
* Conclude by outlining what the *next* thought needs to address to fulfill the overall plan, considering the Coordinator's synthesis and suggestions.
|
556
|
+
|
557
|
+
**Process:**
|
558
|
+
|
559
|
+
* The `sequentialthinking` tool will track your progress. The Agno team operates in 'coordinate' mode. The Coordinator agent receives your thought, delegates sub-tasks to specialists (like Analyzer, Critic), and synthesizes their results, potentially including recommendations for revision or branching.
|
560
|
+
* Focus on insightful analysis, constructive critique (leading to potential revisions), and creative exploration (leading to potential branching).
|
561
|
+
* Actively reflect on the process. Linear thinking might be insufficient for complex problems. Proceed with the first thought."""
|
562
|
+
|
563
|
+
return {
|
564
|
+
"description": "Mandatory non-linear sequential thinking starter prompt (coordinate mode)",
|
565
|
+
"messages": [{"role": "user", "content": {"type": "text", "text": prompt_text}}]
|
566
|
+
}
|
567
|
+
|
568
|
+
# Removed process_agent_tasks function as it's not needed for coordinate mode.
|
569
|
+
# The Team's coordinator handles delegation internally.
|
570
|
+
|
571
|
+
@mcp.tool()
|
572
|
+
async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: int, nextThoughtNeeded: bool,
|
573
|
+
isRevision: bool = False, revisesThought: Optional[int] = None,
|
574
|
+
branchFromThought: Optional[int] = None, branchId: Optional[str] = None,
|
575
|
+
needsMoreThoughts: bool = False) -> str:
|
576
|
+
"""
|
577
|
+
Processes one step in a sequential thinking chain using the Agno team in coordinate mode.
|
578
|
+
|
579
|
+
The Coordinator agent within the team receives the thought, breaks it down,
|
580
|
+
delegates to specialists (Planner, Researcher, Analyzer, Critic, Synthesizer),
|
581
|
+
and synthesizes their outputs into a final response. The Coordinator's response
|
582
|
+
may include suggestions for revision or branching.
|
583
|
+
|
584
|
+
Parameters:
|
585
|
+
thought (str): The current thinking step.
|
586
|
+
thoughtNumber (int): Current sequence number (≥1)
|
587
|
+
totalThoughts (int): Estimated total thoughts needed (≥5 suggested)
|
588
|
+
nextThoughtNeeded (bool): Whether another thought step is needed
|
589
|
+
isRevision (bool, optional): Whether this revises previous thinking
|
590
|
+
revisesThought (int, optional): Which thought is being reconsidered
|
591
|
+
branchFromThought (int, optional): If branching, which thought number is the branch point
|
592
|
+
branchId (str, optional): Branch identifier
|
593
|
+
needsMoreThoughts (bool, optional): If more thoughts are needed beyond current estimate
|
594
|
+
|
595
|
+
Returns:
|
596
|
+
str: JSON string containing the Coordinator's synthesized response and status.
|
597
|
+
"""
|
598
|
+
global app_context
|
599
|
+
if not app_context or not app_context.team:
|
600
|
+
logger.error("Application context or Agno team not initialized during tool call.")
|
601
|
+
raise Exception("Critical Error: Application context not available.")
|
602
|
+
|
603
|
+
MIN_TOTAL_THOUGHTS = 5 # Keep a minimum suggestion
|
604
|
+
|
605
|
+
try:
|
606
|
+
# --- Initial Validation and Adjustments ---
|
607
|
+
adjusted_total_thoughts = max(MIN_TOTAL_THOUGHTS, totalThoughts)
|
608
|
+
if adjusted_total_thoughts != totalThoughts:
|
609
|
+
logger.info(f"Initial totalThoughts ({totalThoughts}) is below suggested minimum {MIN_TOTAL_THOUGHTS}. Proceeding, but consider if more steps might be needed.")
|
610
|
+
# Let the LLM manage the estimate.
|
611
|
+
|
612
|
+
adjusted_next_thought_needed = nextThoughtNeeded
|
613
|
+
if thoughtNumber >= totalThoughts and not needsMoreThoughts:
|
614
|
+
adjusted_next_thought_needed = False
|
615
|
+
|
616
|
+
# If extending, ensure totalThoughts increases and next is needed
|
617
|
+
if needsMoreThoughts and thoughtNumber >= totalThoughts:
|
618
|
+
totalThoughts = thoughtNumber + 2 # Extend by at least 2
|
619
|
+
logger.info(f"Extended totalThoughts to {totalThoughts} due to needsMoreThoughts flag.")
|
620
|
+
adjusted_next_thought_needed = True # Ensure we continue
|
621
|
+
|
622
|
+
# Create ThoughtData instance *after* initial adjustments
|
623
|
+
current_input_thought = ThoughtData(
|
624
|
+
thought=thought,
|
625
|
+
thoughtNumber=thoughtNumber,
|
626
|
+
totalThoughts=totalThoughts, # Use original or extended totalThoughts
|
627
|
+
nextThoughtNeeded=adjusted_next_thought_needed,
|
628
|
+
isRevision=isRevision,
|
629
|
+
revisesThought=revisesThought,
|
630
|
+
branchFromThought=branchFromThought,
|
631
|
+
branchId=branchId,
|
632
|
+
needsMoreThoughts=needsMoreThoughts # Preserve flag
|
633
|
+
)
|
634
|
+
|
635
|
+
# --- Logging and History Update ---
|
636
|
+
log_prefix = "--- Received Thought ---"
|
637
|
+
if current_input_thought.isRevision:
|
638
|
+
log_prefix = f"--- Received REVISION Thought (revising #{current_input_thought.revisesThought}) ---"
|
639
|
+
elif current_input_thought.branchFromThought is not None:
|
640
|
+
log_prefix = f"--- Received BRANCH Thought (from #{current_input_thought.branchFromThought}, ID: {current_input_thought.branchId}) ---"
|
641
|
+
|
642
|
+
formatted_log_thought = format_thought_for_log(current_input_thought)
|
643
|
+
logger.info(f"\n{log_prefix}\n{formatted_log_thought}\n")
|
644
|
+
|
645
|
+
# Add the thought to history
|
646
|
+
app_context.add_thought(current_input_thought)
|
647
|
+
|
648
|
+
# --- Process Thought with Team (Coordinate Mode) ---
|
649
|
+
logger.info(f"Passing thought #{thoughtNumber} to the Coordinator...")
|
650
|
+
|
651
|
+
# Prepare input for the team coordinator. Pass the core thought content.
|
652
|
+
# Include context about revision/branching directly in the input string for the coordinator.
|
653
|
+
input_prompt = f"Process Thought #{current_input_thought.thoughtNumber}:\n"
|
654
|
+
if current_input_thought.isRevision and current_input_thought.revisesThought is not None:
|
655
|
+
# Find the original thought text
|
656
|
+
original_thought_text = "Unknown Original Thought"
|
657
|
+
for hist_thought in app_context.thought_history[:-1]: # Exclude current one
|
658
|
+
if hist_thought.thoughtNumber == current_input_thought.revisesThought:
|
659
|
+
original_thought_text = hist_thought.thought
|
660
|
+
break
|
661
|
+
input_prompt += f"**This is a REVISION of Thought #{current_input_thought.revisesThought}** (Original: \"{original_thought_text}\").\n"
|
662
|
+
elif current_input_thought.branchFromThought is not None and current_input_thought.branchId is not None:
|
663
|
+
# Find the branching point thought text
|
664
|
+
branch_point_text = "Unknown Branch Point"
|
665
|
+
for hist_thought in app_context.thought_history[:-1]:
|
666
|
+
if hist_thought.thoughtNumber == current_input_thought.branchFromThought:
|
667
|
+
branch_point_text = hist_thought.thought
|
668
|
+
break
|
669
|
+
input_prompt += f"**This is a BRANCH (ID: {current_input_thought.branchId}) from Thought #{current_input_thought.branchFromThought}** (Origin: \"{branch_point_text}\").\n"
|
670
|
+
|
671
|
+
input_prompt += f"\nThought Content: \"{current_input_thought.thought}\""
|
672
|
+
|
673
|
+
# Call the team's arun method. The coordinator agent will handle it.
|
674
|
+
team_response = await app_context.team.arun(input_prompt)
|
675
|
+
|
676
|
+
coordinator_response = team_response.content if hasattr(team_response, 'content') else str(team_response)
|
677
|
+
logger.info(f"Coordinator finished processing thought #{thoughtNumber}.")
|
678
|
+
logger.debug(f"Coordinator Raw Response:\n{coordinator_response}")
|
679
|
+
|
680
|
+
|
681
|
+
# --- Guidance for Next Step (Coordinate Mode) ---
|
682
|
+
additional_guidance = "\n\nGuidance for next step:"
|
683
|
+
next_thought_num = current_input_thought.thoughtNumber + 1
|
684
|
+
|
685
|
+
if not current_input_thought.nextThoughtNeeded:
|
686
|
+
additional_guidance = "\n\nThis is the final thought based on current estimates or your signal. Review the Coordinator's final synthesis."
|
687
|
+
else:
|
688
|
+
additional_guidance += " Review the Coordinator's synthesized response above."
|
689
|
+
additional_guidance += "\n**Revision/Branching:** Did the Coordinator recommend revising a previous thought ('RECOMMENDATION: Revise thought #X...')? If so, use `isRevision=True` and `revisesThought=X` in your next call."
|
690
|
+
additional_guidance += " Did the Coordinator suggest exploring alternatives ('SUGGESTION: Consider branching...')? If so, consider using `branchFromThought=Y` and `branchId='new-branch-Z'`."
|
691
|
+
additional_guidance += "\n**Next Thought:** Based on the Coordinator's output and the overall goal, formulate the next logical thought. Address any specific points raised by the Coordinator."
|
692
|
+
additional_guidance += "\n**ToT Principle:** If the Coordinator highlighted multiple viable paths or unresolved alternatives, consider initiating parallel branches (using distinct `branchId`s originating from the same `branchFromThought`) in subsequent steps to explore them, aiming for later evaluation/synthesis."
|
693
|
+
|
694
|
+
|
695
|
+
# --- Build Result ---
|
696
|
+
result_data = {
|
697
|
+
"processedThoughtNumber": current_input_thought.thoughtNumber,
|
698
|
+
"estimatedTotalThoughts": current_input_thought.totalThoughts,
|
699
|
+
"nextThoughtNeeded": current_input_thought.nextThoughtNeeded,
|
700
|
+
"coordinatorResponse": coordinator_response + additional_guidance, # Coordinator's synthesized response + guidance
|
701
|
+
"branches": list(app_context.branches.keys()),
|
702
|
+
"thoughtHistoryLength": len(app_context.thought_history),
|
703
|
+
"branchDetails": {
|
704
|
+
"currentBranchId": current_input_thought.branchId if current_input_thought.branchFromThought is not None else "main",
|
705
|
+
"branchOriginThought": current_input_thought.branchFromThought,
|
706
|
+
"allBranches": app_context.get_all_branches() # Include counts
|
707
|
+
},
|
708
|
+
"isRevision": current_input_thought.isRevision,
|
709
|
+
"revisesThought": current_input_thought.revisesThought if current_input_thought.isRevision else None,
|
710
|
+
"isBranch": current_input_thought.branchFromThought is not None,
|
711
|
+
"status": "success"
|
712
|
+
}
|
713
|
+
|
714
|
+
return json.dumps(result_data, indent=2)
|
715
|
+
|
716
|
+
except ValidationError as e:
|
717
|
+
logger.error(f"Validation Error processing tool call: {e}")
|
718
|
+
# Provide detailed validation error back to the caller
|
719
|
+
return json.dumps({
|
720
|
+
"error": f"Input validation failed: {e}",
|
721
|
+
"status": "validation_error"
|
722
|
+
}, indent=2)
|
723
|
+
except Exception as e:
|
724
|
+
logger.exception(f"Error processing tool call") # Log full traceback
|
725
|
+
return json.dumps({
|
726
|
+
"error": f"An unexpected error occurred: {str(e)}",
|
727
|
+
"status": "failed"
|
728
|
+
}, indent=2)
|
729
|
+
|
730
|
+
# --- Main Execution ---
|
731
|
+
|
732
|
+
def run():
|
733
|
+
"""Initializes and runs the MCP server in coordinate mode."""
|
734
|
+
logger.info("Initializing Sequential Thinking Server (Coordinate Mode)...")
|
735
|
+
|
736
|
+
global app_context
|
737
|
+
# Initialize application resources using the lifespan manager implicitly if running via framework
|
738
|
+
# For direct script execution, we initialize here.
|
739
|
+
# If using app_lifespan, this manual init might be redundant depending on framework.
|
740
|
+
if not app_context: # Check if context already exists (e.g., from lifespan manager)
|
741
|
+
logger.info("Initializing application resources directly (Coordinate Mode)...")
|
742
|
+
try:
|
743
|
+
team = create_sequential_thinking_team()
|
744
|
+
app_context = AppContext(team=team)
|
745
|
+
logger.info("Agno team initialized directly in coordinate mode.")
|
746
|
+
except Exception as e:
|
747
|
+
logger.critical(f"Failed to initialize Agno team: {e}", exc_info=True)
|
748
|
+
sys.exit(1)
|
749
|
+
|
750
|
+
|
751
|
+
try:
|
752
|
+
logger.info("Sequential Thinking MCP Server running on stdio (Coordinate Mode)")
|
753
|
+
if not app_context:
|
754
|
+
logger.critical("FATAL: Application context not initialized before run.")
|
755
|
+
sys.exit(1)
|
756
|
+
|
757
|
+
mcp.run(transport="stdio")
|
758
|
+
finally:
|
759
|
+
logger.info("Shutting down application resources...")
|
760
|
+
app_context = None # Clean up context if initialized directly
|
761
|
+
|
762
|
+
if __name__ == "__main__":
|
763
|
+
# Check necessary environment variables
|
764
|
+
if "DEEPSEEK_API_KEY" not in os.environ:
|
765
|
+
logger.warning("DEEPSEEK_API_KEY environment variable not found. Model initialization might fail.")
|
766
|
+
|
767
|
+
# Check EXA_API_KEY only if ExaTools are actually used by any member agent
|
768
|
+
try:
|
769
|
+
team_for_check = create_sequential_thinking_team() # Create temporarily to check tools
|
770
|
+
uses_exa = False
|
771
|
+
# Iterate through members to check for ExaTools
|
772
|
+
for member in team_for_check.members:
|
773
|
+
if hasattr(member, 'tools') and member.tools:
|
774
|
+
if any(isinstance(t, ExaTools) for t in member.tools):
|
775
|
+
uses_exa = True
|
776
|
+
break # Found it, no need to check further
|
777
|
+
|
778
|
+
if uses_exa and "EXA_API_KEY" not in os.environ:
|
779
|
+
logger.warning("EXA_API_KEY environment variable not found, but ExaTools are configured in a team member. Researcher agent might fail.")
|
780
|
+
|
781
|
+
# Run the application
|
782
|
+
run()
|
783
|
+
except Exception as e:
|
784
|
+
logger.critical(f"Failed during initial setup or checks: {e}", exc_info=True)
|
785
|
+
sys.exit(1)
|
@@ -0,0 +1,243 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: mcp-server-mas-sequential-thinking
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: MCP Agent Implementation for Sequential Thinking
|
5
|
+
Author-email: Frad LEE <fradser@gmail.com>
|
6
|
+
Requires-Python: >=3.10
|
7
|
+
Requires-Dist: agno
|
8
|
+
Requires-Dist: asyncio
|
9
|
+
Requires-Dist: exa-py
|
10
|
+
Requires-Dist: mcp
|
11
|
+
Requires-Dist: python-dotenv
|
12
|
+
Provides-Extra: dev
|
13
|
+
Requires-Dist: black; extra == 'dev'
|
14
|
+
Requires-Dist: isort; extra == 'dev'
|
15
|
+
Requires-Dist: mypy; extra == 'dev'
|
16
|
+
Requires-Dist: pytest; extra == 'dev'
|
17
|
+
Description-Content-Type: text/markdown
|
18
|
+
|
19
|
+
# Sequential Thinking Multi-Agent System (MAS) 
|
20
|
+
|
21
|
+
[](https://twitter.com/FradSer) [](https://www.python.org/downloads/) [](https://github.com/cognitivecomputations/agno)
|
22
|
+
|
23
|
+
English | [简体中文](README.zh-CN.md)
|
24
|
+
|
25
|
+
This project implements an advanced sequential thinking process using a **Multi-Agent System (MAS)** built with the **Agno** framework and served via **MCP**. It represents a significant evolution from simpler state-tracking approaches, leveraging coordinated specialized agents for deeper analysis and problem decomposition.
|
26
|
+
|
27
|
+
## Overview
|
28
|
+
|
29
|
+
This server provides a sophisticated `sequentialthinking` tool designed for complex problem-solving. Unlike [its predecessor](https://github.com/modelcontextprotocol/servers/tree/main/src/sequentialthinking), this version utilizes a true Multi-Agent System (MAS) architecture where:
|
30
|
+
|
31
|
+
* **A Coordinating Agent** (the `Team` object in `coordinate` mode) manages the workflow.
|
32
|
+
* **Specialized Agents** (Planner, Researcher, Analyzer, Critic, Synthesizer) handle specific sub-tasks based on their defined roles and expertise.
|
33
|
+
* Incoming thoughts are actively **processed, analyzed, and synthesized** by the agent team, not just logged.
|
34
|
+
* The system supports complex thought patterns including **revisions** of previous steps and **branching** to explore alternative paths.
|
35
|
+
* Integration with external tools like **Exa** (via the Researcher agent) allows for dynamic information gathering.
|
36
|
+
* Robust **Pydantic** validation ensures data integrity for thought steps.
|
37
|
+
* Detailed **logging** tracks the process, including agent interactions (handled by the coordinator).
|
38
|
+
|
39
|
+
The goal is to achieve a higher quality of analysis and a more nuanced thinking process than possible with a single agent or simple state tracking, by harnessing the power of specialized roles working collaboratively.
|
40
|
+
|
41
|
+
## Key Differences from Original Version (TypeScript)
|
42
|
+
|
43
|
+
This Python/Agno implementation marks a fundamental shift from the original TypeScript version:
|
44
|
+
|
45
|
+
| Feature/Aspect | Python/Agno Version (Current) | TypeScript Version (Original) |
|
46
|
+
| :------------------ | :------------------------------------------------------------------- | :--------------------------------------------------- |
|
47
|
+
| **Architecture** | **Multi-Agent System (MAS)**; Active processing by a team of agents. | **Single Class State Tracker**; Simple logging/storing. |
|
48
|
+
| **Intelligence** | **Distributed Agent Logic**; Embedded in specialized agents & Coordinator. | **External LLM Only**; No internal intelligence. |
|
49
|
+
| **Processing** | **Active Analysis & Synthesis**; Agents *act* on the thought. | **Passive Logging**; Merely recorded the thought. |
|
50
|
+
| **Frameworks** | **Agno (MAS) + FastMCP (Server)**; Uses dedicated MAS library. | **MCP SDK only**. |
|
51
|
+
| **Coordination** | **Explicit Team Coordination Logic** (`Team` in `coordinate` mode). | **None**; No coordination concept. |
|
52
|
+
| **Validation** | **Pydantic Schema Validation**; Robust data validation. | **Basic Type Checks**; Less reliable. |
|
53
|
+
| **External Tools** | **Integrated (Exa via Researcher)**; Can perform research tasks. | **None**. |
|
54
|
+
| **Logging** | **Structured Python Logging (File + Console)**; Configurable. | **Console Logging with Chalk**; Basic. |
|
55
|
+
| **Language & Ecosystem** | **Python**; Leverages Python AI/ML ecosystem. | **TypeScript/Node.js**. |
|
56
|
+
|
57
|
+
In essence, the system evolved from a passive thought *recorder* to an active thought *processor* powered by a collaborative team of AI agents.
|
58
|
+
|
59
|
+
## How it Works (Coordinate Mode)
|
60
|
+
|
61
|
+
1. **Initiation:** An external LLM uses the `sequential-thinking-starter` prompt to define the problem and initiate the process.
|
62
|
+
2. **Tool Call:** The LLM calls the `sequentialthinking` tool with the first (or subsequent) thought, structured according to the `ThoughtData` model.
|
63
|
+
3. **Validation & Logging:** The tool receives the call, validates the input using Pydantic, logs the incoming thought, and updates the history/branch state via `AppContext`.
|
64
|
+
4. **Coordinator Invocation:** The core thought content (with context about revisions/branches) is passed to the `SequentialThinkingTeam`'s `arun` method.
|
65
|
+
5. **Coordinator Analysis & Delegation:** The `Team` (acting as Coordinator) analyzes the input thought, breaks it into sub-tasks, and delegates these sub-tasks to the *most relevant* specialist agents (e.g., Analyzer for analysis tasks, Researcher for information needs).
|
66
|
+
6. **Specialist Execution:** Delegated agents execute their specific sub-tasks using their instructions, models, and tools (like `ThinkingTools` or `ExaTools`).
|
67
|
+
7. **Response Collection:** Specialists return their results to the Coordinator.
|
68
|
+
8. **Synthesis & Guidance:** The Coordinator synthesizes the specialists' responses into a single, cohesive output. It may include recommendations for revision or branching based on the specialists' findings (especially the Critic and Analyzer). It also adds guidance for the LLM on formulating the next thought.
|
69
|
+
9. **Return Value:** The tool returns a JSON string containing the Coordinator's synthesized response, status, and updated context (branches, history length).
|
70
|
+
10. **Iteration:** The calling LLM uses the Coordinator's response and guidance to formulate the next `sequentialthinking` tool call, potentially triggering revisions or branches as suggested.
|
71
|
+
|
72
|
+
## Token Consumption Warning
|
73
|
+
|
74
|
+
⚠️ **High Token Usage:** Due to the Multi-Agent System architecture, this tool consumes significantly **more tokens** than single-agent alternatives or the previous TypeScript version. Each `sequentialthinking` call invokes:
|
75
|
+
* The Coordinator agent (the `Team` itself).
|
76
|
+
* Multiple specialist agents (potentially Planner, Researcher, Analyzer, Critic, Synthesizer, depending on the Coordinator's delegation).
|
77
|
+
|
78
|
+
This parallel processing leads to substantially higher token usage (potentially 3-6x or more per thought step) compared to single-agent or state-tracking approaches. Budget and plan accordingly. This tool prioritizes **analysis depth and quality** over token efficiency.
|
79
|
+
|
80
|
+
## Prerequisites
|
81
|
+
|
82
|
+
* Python 3.10+
|
83
|
+
* Access to a compatible LLM API (configured for `agno`, e.g., DeepSeek)
|
84
|
+
* `DEEPSEEK_API_KEY` environment variable.
|
85
|
+
* Exa API Key (if using the Researcher agent's capabilities)
|
86
|
+
* `EXA_API_KEY` environment variable.
|
87
|
+
* `uv` package manager (recommended) or `pip`.
|
88
|
+
|
89
|
+
## MCP Server Configuration (Client-Side)
|
90
|
+
|
91
|
+
This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
|
92
|
+
|
93
|
+
```
|
94
|
+
{
|
95
|
+
"mcpServers": {
|
96
|
+
"mas-sequential-thinking": {
|
97
|
+
"command": "uvx",
|
98
|
+
"args": [
|
99
|
+
"mcp-server-mas-sequential-thinking"
|
100
|
+
],
|
101
|
+
env": {
|
102
|
+
"DEEPSEEK_API_KEY": "your_deepseek_api_key",
|
103
|
+
"DEEPSEEK_BASE_URL": "your_base_url_if_needed", # Optional: If using a custom endpoint
|
104
|
+
"EXA_API_KEY": "your_exa_api_key"
|
105
|
+
}
|
106
|
+
}
|
107
|
+
}
|
108
|
+
}
|
109
|
+
```
|
110
|
+
|
111
|
+
## Installation & Setup
|
112
|
+
|
113
|
+
1. **Clone the repository:**
|
114
|
+
```bash
|
115
|
+
git clone git@github.com:FradSer/mcp-server-mas-sequential-thinking.git
|
116
|
+
cd mcp-server-mas-sequential-thinking
|
117
|
+
```
|
118
|
+
|
119
|
+
2. **Set Environment Variables:**
|
120
|
+
Create a `.env` file in the root directory or export the variables:
|
121
|
+
```dotenv
|
122
|
+
# Required for the LLM used by Agno agents/team
|
123
|
+
DEEPSEEK_API_KEY="your_deepseek_api_key"
|
124
|
+
# DEEPSEEK_BASE_URL="your_base_url_if_needed" # Optional: If using a custom endpoint
|
125
|
+
|
126
|
+
# Required ONLY if the Researcher agent is used and needs Exa
|
127
|
+
EXA_API_KEY="your_exa_api_key"
|
128
|
+
```
|
129
|
+
|
130
|
+
3. **Install Dependencies:**
|
131
|
+
|
132
|
+
* **Using `uv` (Recommended):**
|
133
|
+
```bash
|
134
|
+
# Install uv if you don't have it:
|
135
|
+
# curl -LsSf [https://astral.sh/uv/install.sh](https://astral.sh/uv/install.sh) | sh
|
136
|
+
# source $HOME/.cargo/env # Or restart your shell
|
137
|
+
|
138
|
+
uv pip install -r requirements.txt
|
139
|
+
# Or if a pyproject.toml exists with dependencies:
|
140
|
+
# uv pip install .
|
141
|
+
```
|
142
|
+
* **Using `pip`:**
|
143
|
+
```bash
|
144
|
+
pip install -r requirements.txt
|
145
|
+
# Or if a pyproject.toml exists with dependencies:
|
146
|
+
# pip install .
|
147
|
+
```
|
148
|
+
|
149
|
+
## Usage
|
150
|
+
|
151
|
+
Run the server script (assuming the main script is named `main.py` or similar based on your file structure):
|
152
|
+
|
153
|
+
```bash
|
154
|
+
python your_main_script_name.py
|
155
|
+
```
|
156
|
+
|
157
|
+
The server will start and listen for requests via stdio, making the `sequentialthinking` tool available to compatible MCP clients (like certain LLMs or testing frameworks).
|
158
|
+
|
159
|
+
### `sequentialthinking` Tool Parameters
|
160
|
+
|
161
|
+
The tool expects arguments matching the `ThoughtData` Pydantic model:
|
162
|
+
|
163
|
+
```python
|
164
|
+
# Simplified representation
|
165
|
+
{
|
166
|
+
"thought": str, # Content of the current thought/step
|
167
|
+
"thoughtNumber": int, # Sequence number (>=1)
|
168
|
+
"totalThoughts": int, # Estimated total steps (>=1, suggest >=5)
|
169
|
+
"nextThoughtNeeded": bool, # Is another step required after this?
|
170
|
+
"isRevision": bool = False, # Is this revising a previous thought?
|
171
|
+
"revisesThought": Optional[int] = None, # If isRevision, which thought number?
|
172
|
+
"branchFromThought": Optional[int] = None, # If branching, from which thought?
|
173
|
+
"branchId": Optional[str] = None, # Unique ID for the branch
|
174
|
+
"needsMoreThoughts": bool = False # Signal if estimate is too low before last step
|
175
|
+
}
|
176
|
+
```
|
177
|
+
|
178
|
+
### Interacting with the Tool (Conceptual Example)
|
179
|
+
|
180
|
+
An LLM would interact with this tool iteratively:
|
181
|
+
|
182
|
+
1. **LLM:** Uses `sequential-thinking-starter` prompt with the problem.
|
183
|
+
2. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 1`, initial `thought` (e.g., "Plan the analysis..."), `totalThoughts` estimate, `nextThoughtNeeded: True`.
|
184
|
+
3. **Server:** MAS processes the thought -> Coordinator synthesizes response & provides guidance (e.g., "Analysis plan complete. Suggest researching X next. No revisions recommended yet.").
|
185
|
+
4. **LLM:** Receives JSON response containing `coordinatorResponse`.
|
186
|
+
5. **LLM:** Formulates the next thought (e.g., "Research X using Exa...") based on the `coordinatorResponse`.
|
187
|
+
6. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 2`, the new `thought`, updated `totalThoughts` (if needed), `nextThoughtNeeded: True`.
|
188
|
+
7. **Server:** MAS processes -> Coordinator synthesizes (e.g., "Research complete. Findings suggest a flaw in thought #1's assumption. RECOMMENDATION: Revise thought #1...").
|
189
|
+
8. **LLM:** Receives response, sees the recommendation.
|
190
|
+
9. **LLM:** Formulates a revision thought.
|
191
|
+
10. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 3`, the revision `thought`, `isRevision: True`, `revisesThought: 1`, `nextThoughtNeeded: True`.
|
192
|
+
11. **... and so on, potentially branching or extending as needed.**
|
193
|
+
|
194
|
+
### Tool Response Format
|
195
|
+
|
196
|
+
The tool returns a JSON string containing:
|
197
|
+
|
198
|
+
```json
|
199
|
+
{
|
200
|
+
"processedThoughtNumber": int,
|
201
|
+
"estimatedTotalThoughts": int,
|
202
|
+
"nextThoughtNeeded": bool,
|
203
|
+
"coordinatorResponse": "Synthesized output from the agent team, including analysis, findings, and guidance for the next step...",
|
204
|
+
"branches": ["list", "of", "branch", "ids"],
|
205
|
+
"thoughtHistoryLength": int,
|
206
|
+
"branchDetails": {
|
207
|
+
"currentBranchId": "main | branchId",
|
208
|
+
"branchOriginThought": null | int,
|
209
|
+
"allBranches": {"main": count, "branchId": count, ...}
|
210
|
+
},
|
211
|
+
"isRevision": bool,
|
212
|
+
"revisesThought": null | int,
|
213
|
+
"isBranch": bool,
|
214
|
+
"status": "success | validation_error | failed",
|
215
|
+
"error": "Error message if status is not success" // Optional
|
216
|
+
}
|
217
|
+
```
|
218
|
+
|
219
|
+
## Logging
|
220
|
+
|
221
|
+
* Logs are written to `~/.sequential_thinking/logs/sequential_thinking.log`.
|
222
|
+
* Uses Python's standard `logging` module.
|
223
|
+
* Includes rotating file handler (10MB limit, 5 backups) and console handler (INFO level).
|
224
|
+
* Logs include timestamps, levels, logger names, and messages, including formatted thought representations.
|
225
|
+
|
226
|
+
## Development
|
227
|
+
|
228
|
+
(Add development guidelines here if applicable, e.g., setting up dev environments, running tests, linting.)
|
229
|
+
|
230
|
+
1. Clone the repository.
|
231
|
+
2. Set up a virtual environment.
|
232
|
+
3. Install dependencies, potentially including development extras:
|
233
|
+
```bash
|
234
|
+
# Using uv
|
235
|
+
uv pip install -e ".[dev]"
|
236
|
+
# Using pip
|
237
|
+
pip install -e ".[dev]"
|
238
|
+
```
|
239
|
+
4. Run linters/formatters/tests.
|
240
|
+
|
241
|
+
## License
|
242
|
+
|
243
|
+
MIT
|
@@ -0,0 +1,5 @@
|
|
1
|
+
main.py,sha256=mDC2ayNFA5ON0v4PMhpWijIHxlk_5h7ak8x6shcqORQ,40049
|
2
|
+
mcp_server_mas_sequential_thinking-0.1.0.dist-info/METADATA,sha256=M-Q_H5hbU0tU1x5v0_dSO_cJFEy30x_nTQB7PIs2f6k,13349
|
3
|
+
mcp_server_mas_sequential_thinking-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
4
|
+
mcp_server_mas_sequential_thinking-0.1.0.dist-info/entry_points.txt,sha256=JKyUvlVU-LK9-6EXglvJfPYNYGDJQSmIms7wlEy_p0g,65
|
5
|
+
mcp_server_mas_sequential_thinking-0.1.0.dist-info/RECORD,,
|