mcp-server-mas-sequential-thinking 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
main.py CHANGED
@@ -308,7 +308,7 @@ def get_model_config() -> tuple[Type[Model], str, str]:
308
308
  ModelClass = DeepSeek
309
309
  # Use environment variables for DeepSeek model IDs if set, otherwise use defaults
310
310
  team_model_id = os.environ.get("DEEPSEEK_TEAM_MODEL_ID", "deepseek-chat")
311
- agent_model_id = os.environ.get("DEEPSEEK_AGENT_MODEL_ID", "deepseek-reasoner")
311
+ agent_model_id = os.environ.get("DEEPSEEK_AGENT_MODEL_ID", "deepseek-chat")
312
312
  logger.info(f"Using DeepSeek: Team Model='{team_model_id}', Agent Model='{agent_model_id}'")
313
313
  elif provider == "groq":
314
314
  ModelClass = Groq
@@ -465,6 +465,7 @@ def create_sequential_thinking_team() -> Team:
465
465
  " 5. Formulate a response presenting the synthesized information or conclusions.",
466
466
  " 6. Return your response to the Team Coordinator.",
467
467
  "Focus on creating clarity and coherence for the delegated synthesis task.",
468
+ "**For the final synthesis task provided by the Coordinator:** Aim for a concise and high-level integration. Focus on the core synthesized understanding and key takeaways, rather than detailing the step-by-step process or extensive analysis of each component.",
468
469
  ],
469
470
  model=agent_model_instance, # Use the designated agent model
470
471
  add_datetime_to_instructions=True,
@@ -573,18 +574,20 @@ mcp = FastMCP()
573
574
 
574
575
  # --- MCP Handlers ---
575
576
 
576
- @mcp.prompt("sequential-thinking-starter")
577
- def sequential_thinking_starter(problem: str, context: str = ""):
577
+ @mcp.prompt("sequential-thinking")
578
+ def sequential_thinking_prompt(problem: str, context: str = ""):
578
579
  """
579
580
  Starter prompt for sequential thinking that ENCOURAGES non-linear exploration
580
- using coordinate mode.
581
+ using coordinate mode. Returns separate user and assistant messages.
581
582
  """
582
583
  min_thoughts = 5 # Set a reasonable minimum number of initial thoughts
583
584
 
584
- prompt_text = f"""Initiate a comprehensive sequential thinking process for the following problem:
585
+ user_prompt_text = f"""Initiate a comprehensive sequential thinking process for the following problem:
585
586
 
586
587
  Problem: {problem}
587
- {f'Context: {context}' if context else ''}
588
+ {f'Context: {context}' if context else ''}"""
589
+
590
+ assistant_guidelines = f"""Okay, let's start the sequential thinking process. Here are the guidelines and the process we'll follow using the 'coordinate' mode team:
588
591
 
589
592
  **Sequential Thinking Goals & Guidelines (Coordinate Mode):**
590
593
 
@@ -600,17 +603,22 @@ Problem: {problem}
600
603
 
601
604
  **Process:**
602
605
 
603
- * The `sequentialthinking` tool will track your progress. The Agno team operates in 'coordinate' mode. The Coordinator agent receives your thought, delegates sub-tasks to specialists (like Analyzer, Critic), and synthesizes their results, potentially including recommendations for revision or branching.
606
+ * The `sequentialthinking` tool will track your progress. The Agno team operates in 'coordinate' mode. The Coordinator agent receives your thought, delegates sub-tasks to specialists (like Analyzer, Critic), and synthesizes their outputs, potentially including recommendations for revision or branching.
604
607
  * Focus on insightful analysis, constructive critique (leading to potential revisions), and creative exploration (leading to potential branching).
605
- * Actively reflect on the process. Linear thinking might be insufficient for complex problems. Proceed with the first thought."""
608
+ * Actively reflect on the process. Linear thinking might be insufficient for complex problems.
606
609
 
607
- return {
608
- "description": "Mandatory non-linear sequential thinking starter prompt (coordinate mode)",
609
- "messages": [{"role": "user", "content": {"type": "text", "text": prompt_text}}]
610
- }
610
+ Proceed with the first thought based on these guidelines."""
611
+
612
+ return [
613
+ {
614
+ "description": "Starter prompt for non-linear sequential thinking (coordinate mode), providing problem and guidelines separately.",
615
+ "messages": [
616
+ {"role": "user", "content": {"type": "text", "text": user_prompt_text}},
617
+ {"role": "assistant", "content": {"type": "text", "text": assistant_guidelines}}
618
+ ]
619
+ }
620
+ ]
611
621
 
612
- # Removed process_agent_tasks function as it's not needed for coordinate mode.
613
- # The Team's coordinator handles delegation internally.
614
622
 
615
623
  @mcp.tool()
616
624
  async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: int, nextThoughtNeeded: bool,
@@ -618,26 +626,60 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
618
626
  branchFromThought: Optional[int] = None, branchId: Optional[str] = None,
619
627
  needsMoreThoughts: bool = False) -> str:
620
628
  """
621
- Processes one step in a sequential thinking chain using the Agno team in coordinate mode.
622
-
623
- The Coordinator agent within the team receives the thought, breaks it down,
624
- delegates to specialists (Planner, Researcher, Analyzer, Critic, Synthesizer),
625
- and synthesizes their outputs into a final response. The Coordinator's response
626
- may include suggestions for revision or branching.
629
+ A detailed tool for dynamic and reflective problem-solving through thoughts.
630
+
631
+ This tool helps analyze problems through a flexible thinking process that can adapt and evolve.
632
+ Each thought can build on, question, or revise previous insights as understanding deepens.
633
+ It uses an Agno multi-agent team (in coordinate mode) to process each thought, where a
634
+ Coordinator delegates sub-tasks to specialists (Planner, Researcher, Analyzer, Critic, Synthesizer)
635
+ and synthesizes their outputs.
636
+
637
+ When to use this tool:
638
+ - Breaking down complex problems into manageable steps.
639
+ - Planning and design processes requiring iterative refinement and revision.
640
+ - Complex analysis where the approach might need course correction based on findings.
641
+ - Problems where the full scope or optimal path is not clear initially.
642
+ - Situations requiring a multi-step solution with context maintained across steps.
643
+ - Tasks where focusing on relevant information and filtering out noise is crucial.
644
+ - Developing and verifying solution hypotheses through a chain of reasoning.
645
+
646
+ Key features & usage guidelines:
647
+ - The process is driven by the caller (e.g., an LLM) making sequential calls to this tool.
648
+ - Start with an initial estimate for `totalThoughts`, but adjust it dynamically via subsequent calls if needed.
649
+ - Use `isRevision=True` and `revisesThought` to explicitly revisit and correct previous steps.
650
+ - Use `branchFromThought` and `branchId` to explore alternative paths or perspectives.
651
+ - If the estimated `totalThoughts` is reached but more steps are needed, set `needsMoreThoughts=True` on the *last* thought within the current estimate to signal the need for extension.
652
+ - Express uncertainty and explore alternatives within the `thought` content.
653
+ - Generate solution hypotheses within the `thought` content when appropriate.
654
+ - Verify hypotheses in subsequent `thought` steps based on the reasoning chain.
655
+ - The caller should repeat the process, calling this tool for each step, until a satisfactory solution is reached.
656
+ - Set `nextThoughtNeeded=False` only when the caller determines the process is complete and a final answer is ready.
627
657
 
628
658
  Parameters:
629
- thought (str): The current thinking step.
630
- thoughtNumber (int): Current sequence number (≥1)
631
- totalThoughts (int): Estimated total thoughts needed (≥5 suggested)
632
- nextThoughtNeeded (bool): Whether another thought step is needed
633
- isRevision (bool, optional): Whether this revises previous thinking
634
- revisesThought (int, optional): Which thought is being reconsidered
635
- branchFromThought (int, optional): If branching, which thought number is the branch point
636
- branchId (str, optional): Branch identifier
637
- needsMoreThoughts (bool, optional): If more thoughts are needed beyond current estimate
659
+ thought (str): The content of the current thinking step. This can be an analytical step,
660
+ a plan, a question, a critique, a revision, a hypothesis, or verification.
661
+ Make it specific enough to imply the desired action.
662
+ thoughtNumber (int): The sequence number of this thought (>=1). Can exceed initial `totalThoughts`
663
+ if the process is extended.
664
+ totalThoughts (int): The current *estimate* of the total thoughts required for the process.
665
+ This can be adjusted by the caller in subsequent calls. Minimum 5 suggested.
666
+ nextThoughtNeeded (bool): Indicates if the caller intends to make another call to this tool
667
+ after the current one. Set to False only when the entire process is deemed complete.
668
+ isRevision (bool, optional): True if this thought revises or corrects a previous thought. Defaults to False.
669
+ revisesThought (int, optional): The `thoughtNumber` of the thought being revised, required if `isRevision` is True.
670
+ Must be less than the current `thoughtNumber`.
671
+ branchFromThought (int, optional): The `thoughtNumber` from which this thought branches to explore an alternative path.
672
+ Defaults to None.
673
+ branchId (str, optional): A unique identifier for the branch being explored, required if `branchFromThought` is set.
674
+ Defaults to None.
675
+ needsMoreThoughts (bool, optional): Set to True on a thought if the caller anticipates needing more
676
+ steps beyond the current `totalThoughts` estimate *after* this thought.
677
+ Defaults to False.
638
678
 
639
679
  Returns:
640
- str: JSON string containing the Coordinator's synthesized response and status.
680
+ str: The Coordinator agent's synthesized response based on specialist contributions for the current `thought`.
681
+ Includes guidance for the caller on potential next steps (e.g., suggestions for revision or branching
682
+ based on the specialists' analysis). The caller uses this response to formulate the *next* thought.
641
683
  """
642
684
  global app_context
643
685
  if not app_context or not app_context.team:
@@ -651,10 +693,8 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
651
693
  logger.info("Successfully re-initialized team and context.")
652
694
  except Exception as init_err:
653
695
  logger.critical(f"Failed to re-initialize Agno team during tool call: {init_err}", exc_info=True)
654
- return json.dumps({
655
- "error": "Critical Error: Application context not available and re-initialization failed.",
656
- "status": "critical_failure"
657
- }, indent=2, ensure_ascii=False)
696
+ # Return only the error message string
697
+ return f"Critical Error: Application context not available and re-initialization failed: {init_err}"
658
698
  # Or raise Exception("Critical Error: Application context not available.")
659
699
 
660
700
  MIN_TOTAL_THOUGHTS = 5 # Keep a minimum suggestion
@@ -730,23 +770,25 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
730
770
  # Call the team's arun method. The coordinator agent will handle it.
731
771
  team_response = await app_context.team.arun(input_prompt)
732
772
 
733
- coordinator_response = team_response.content if hasattr(team_response, 'content') else str(team_response)
773
+ # Ensure coordinator_response is a string, default to empty string if None
774
+ coordinator_response_content = team_response.content if hasattr(team_response, 'content') else None
775
+ coordinator_response = str(coordinator_response_content) if coordinator_response_content is not None else ""
776
+
734
777
  logger.info(f"Coordinator finished processing thought #{thoughtNumber}.")
735
778
  logger.debug(f"Coordinator Raw Response:\n{coordinator_response}")
736
779
 
737
780
 
738
781
  # --- Guidance for Next Step (Coordinate Mode) ---
739
- additional_guidance = "\n\nGuidance for next step:"
740
- next_thought_num = current_input_thought.thoughtNumber + 1
782
+ additional_guidance = "\n\nGuidance for next step:" # Initialize
741
783
 
742
784
  if not current_input_thought.nextThoughtNeeded:
743
- additional_guidance = "\n\nThis is the final thought based on current estimates or your signal. Review the Coordinator's final synthesis."
785
+ # Keep the message for the final thought concise
786
+ additional_guidance = "\n\nThis is the final thought. Review the Coordinator's final synthesis."
744
787
  else:
745
- additional_guidance += " Review the Coordinator's synthesized response above."
746
- additional_guidance += "\n**Revision/Branching:** Did the Coordinator recommend revising a previous thought ('RECOMMENDATION: Revise thought #X...')? If so, use `isRevision=True` and `revisesThought=X` in your next call."
747
- additional_guidance += " Did the Coordinator suggest exploring alternatives ('SUGGESTION: Consider branching...')? If so, consider using `branchFromThought=Y` and `branchId='new-branch-Z'`."
748
- additional_guidance += "\n**Next Thought:** Based on the Coordinator's output and the overall goal, formulate the next logical thought. Address any specific points raised by the Coordinator."
749
- additional_guidance += "\n**ToT Principle:** If the Coordinator highlighted multiple viable paths or unresolved alternatives, consider initiating parallel branches (using distinct `branchId`s originating from the same `branchFromThought`) in subsequent steps to explore them, aiming for later evaluation/synthesis."
788
+ # Start guidance text for non-final thoughts
789
+ additional_guidance += "\n- **Revision/Branching:** Look for 'RECOMMENDATION: Revise thought #X...' or 'SUGGESTION: Consider branching...' in the response."
790
+ additional_guidance += " Use `isRevision=True`/`revisesThought=X` for revisions or `branchFromThought=Y`/`branchId='...'` for branching accordingly."
791
+ additional_guidance += "\n- **Next Thought:** Based on the Coordinator's response, formulate the next logical thought, addressing any points raised."
750
792
 
751
793
 
752
794
  # --- Build Result ---
@@ -754,7 +796,8 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
754
796
  "processedThoughtNumber": current_input_thought.thoughtNumber,
755
797
  "estimatedTotalThoughts": current_input_thought.totalThoughts,
756
798
  "nextThoughtNeeded": current_input_thought.nextThoughtNeeded,
757
- "coordinatorResponse": coordinator_response + additional_guidance, # Coordinator's synthesized response + guidance
799
+ # Ensure both parts are strings before concatenating
800
+ "coordinatorResponse": coordinator_response + str(additional_guidance),
758
801
  "branches": list(app_context.branches.keys()),
759
802
  "thoughtHistoryLength": len(app_context.thought_history),
760
803
  "branchDetails": {
@@ -768,21 +811,17 @@ async def sequentialthinking(thought: str, thoughtNumber: int, totalThoughts: in
768
811
  "status": "success"
769
812
  }
770
813
 
771
- return json.dumps(result_data, indent=2, ensure_ascii=False)
814
+ # Return only the coordinatorResponse string
815
+ return result_data["coordinatorResponse"]
772
816
 
773
817
  except ValidationError as e:
774
818
  logger.error(f"Validation Error processing tool call: {e}")
775
- # Provide detailed validation error back to the caller
776
- return json.dumps({
777
- "error": f"Input validation failed: {e}",
778
- "status": "validation_error"
779
- }, indent=2, ensure_ascii=False)
819
+ # Return only the error message string
820
+ return f"Input validation failed: {e}"
780
821
  except Exception as e:
781
822
  logger.exception(f"Error processing tool call") # Log full traceback
782
- return json.dumps({
783
- "error": f"An unexpected error occurred: {str(e)}",
784
- "status": "failed"
785
- }, indent=2, ensure_ascii=False)
823
+ # Return only the error message string
824
+ return f"An unexpected error occurred: {str(e)}"
786
825
 
787
826
  # --- Main Execution ---
788
827
 
@@ -0,0 +1,329 @@
1
+ Metadata-Version: 2.4
2
+ Name: mcp-server-mas-sequential-thinking
3
+ Version: 0.2.3
4
+ Summary: MCP Agent Implementation for Sequential Thinking
5
+ Author-email: Frad LEE <fradser@gmail.com>
6
+ Requires-Python: >=3.10
7
+ Requires-Dist: agno
8
+ Requires-Dist: asyncio
9
+ Requires-Dist: exa-py
10
+ Requires-Dist: groq
11
+ Requires-Dist: mcp
12
+ Requires-Dist: python-dotenv
13
+ Provides-Extra: dev
14
+ Requires-Dist: black; extra == 'dev'
15
+ Requires-Dist: isort; extra == 'dev'
16
+ Requires-Dist: mypy; extra == 'dev'
17
+ Requires-Dist: pytest; extra == 'dev'
18
+ Description-Content-Type: text/markdown
19
+
20
+ # Sequential Thinking Multi-Agent System (MAS) ![](https://img.shields.io/badge/A%20FRAD%20PRODUCT-WIP-yellow)
21
+
22
+ [![smithery badge](https://smithery.ai/badge/@FradSer/mcp-server-mas-sequential-thinking)](https://smithery.ai/server/@FradSer/mcp-server-mas-sequential-thinking) [![Twitter Follow](https://img.shields.io/twitter/follow/FradSer?style=social)](https://twitter.com/FradSer) [![Python Version](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![Framework](https://img.shields.io/badge/Framework-Agno-orange.svg)](https://github.com/cognitivecomputations/agno)
23
+
24
+ English | [简体中文](README.zh-CN.md)
25
+
26
+ This project implements an advanced sequential thinking process using a **Multi-Agent System (MAS)** built with the **Agno** framework and served via **MCP**. It represents a significant evolution from simpler state-tracking approaches by leveraging coordinated, specialized agents for deeper analysis and problem decomposition.
27
+
28
+ ## Overview
29
+
30
+ This server provides a sophisticated `sequentialthinking` tool designed for complex problem-solving. Unlike [its predecessor](https://github.com/modelcontextprotocol/servers/tree/main/src/sequentialthinking), this version utilizes a true Multi-Agent System (MAS) architecture where:
31
+
32
+ - **A Coordinating Agent** (the `Team` object in `coordinate` mode) manages the workflow.
33
+ - **Specialized Agents** (Planner, Researcher, Analyzer, Critic, Synthesizer) handle specific sub-tasks based on their defined roles and expertise.
34
+ - Incoming thoughts are actively **processed, analyzed, and synthesized** by the agent team, not just logged.
35
+ - The system supports complex thought patterns, including **revisions** of previous steps and **branching** to explore alternative paths.
36
+ - Integration with external tools like **Exa** (via the Researcher agent) allows for dynamic information gathering.
37
+ - Robust **Pydantic** validation ensures data integrity for thought steps.
38
+ - Detailed **logging** tracks the process, including agent interactions (handled by the coordinator).
39
+
40
+ The goal is to achieve a higher quality of analysis and a more nuanced thinking process than possible with a single agent or simple state tracking by harnessing the power of specialized roles working collaboratively.
41
+
42
+ ## Key Differences from Original Version (TypeScript)
43
+
44
+ This Python/Agno implementation marks a fundamental shift from the original TypeScript version:
45
+
46
+ | Feature/Aspect | Python/Agno Version (Current) | TypeScript Version (Original) |
47
+ | :------------------ | :------------------------------------------------------------------- | :--------------------------------------------------- |
48
+ | **Architecture** | **Multi-Agent System (MAS)**; Active processing by a team of agents. | **Single Class State Tracker**; Simple logging/storing. |
49
+ | **Intelligence** | **Distributed Agent Logic**; Embedded in specialized agents & Coordinator. | **External LLM Only**; No internal intelligence. |
50
+ | **Processing** | **Active Analysis & Synthesis**; Agents *act* on the thought. | **Passive Logging**; Merely recorded the thought. |
51
+ | **Frameworks** | **Agno (MAS) + FastMCP (Server)**; Uses dedicated MAS library. | **MCP SDK only**. |
52
+ | **Coordination** | **Explicit Team Coordination Logic** (`Team` in `coordinate` mode). | **None**; No coordination concept. |
53
+ | **Validation** | **Pydantic Schema Validation**; Robust data validation. | **Basic Type Checks**; Less reliable. |
54
+ | **External Tools** | **Integrated (Exa via Researcher)**; Can perform research tasks. | **None**. |
55
+ | **Logging** | **Structured Python Logging (File + Console)**; Configurable. | **Console Logging with Chalk**; Basic. |
56
+ | **Language & Ecosystem** | **Python**; Leverages Python AI/ML ecosystem. | **TypeScript/Node.js**. |
57
+
58
+ In essence, the system evolved from a passive thought *recorder* to an active thought *processor* powered by a collaborative team of AI agents.
59
+
60
+ ## How it Works (Coordinate Mode)
61
+
62
+ 1. **Initiation:** An external LLM uses the `sequential-thinking-starter` prompt to define the problem and initiate the process.
63
+ 2. **Tool Call:** The LLM calls the `sequentialthinking` tool with the first (or subsequent) thought, structured according to the `ThoughtData` Pydantic model.
64
+ 3. **Validation & Logging:** The tool receives the call, validates the input using Pydantic, logs the incoming thought, and updates the history/branch state via `AppContext`.
65
+ 4. **Coordinator Invocation:** The core thought content (along with context about revisions/branches) is passed to the `SequentialThinkingTeam`'s `arun` method.
66
+ 5. **Coordinator Analysis & Delegation:** The `Team` (acting as Coordinator) analyzes the input thought, breaks it down into sub-tasks, and delegates these sub-tasks to the *most relevant* specialist agents (e.g., Analyzer for analysis tasks, Researcher for information needs).
67
+ 6. **Specialist Execution:** Delegated agents execute their specific sub-tasks using their instructions, models, and tools (like `ThinkingTools` or `ExaTools`).
68
+ 7. **Response Collection:** Specialists return their results to the Coordinator.
69
+ 8. **Synthesis & Guidance:** The Coordinator synthesizes the specialists' responses into a single, cohesive output. This output may include recommendations for revision or branching based on the specialists' findings (especially from the Critic and Analyzer). It also provides guidance for the LLM on formulating the next thought.
70
+ 9. **Return Value:** The tool returns a JSON string containing the Coordinator's synthesized response, status, and updated context (branches, history length).
71
+ 10. **Iteration:** The calling LLM uses the Coordinator's response and guidance to formulate the next `sequentialthinking` tool call, potentially triggering revisions or branches as suggested.
72
+
73
+ ## Token Consumption Warning
74
+
75
+ ⚠️ **High Token Usage:** Due to the Multi-Agent System architecture, this tool consumes significantly **more tokens** than single-agent alternatives or the previous TypeScript version. Each `sequentialthinking` call invokes:
76
+
77
+ - The Coordinator agent (the `Team` itself).
78
+ - Multiple specialist agents (potentially Planner, Researcher, Analyzer, Critic, Synthesizer, depending on the Coordinator's delegation).
79
+
80
+ This parallel processing leads to substantially higher token usage (potentially 3-6x or more per thought step) compared to single-agent or state-tracking approaches. Budget and plan accordingly. This tool prioritizes **analysis depth and quality** over token efficiency.
81
+
82
+ ## Prerequisites
83
+
84
+ - Python 3.10+
85
+ - Access to a compatible LLM API (configured for `agno`). The system currently supports:
86
+ - **Groq:** Requires `GROQ_API_KEY`.
87
+ - **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
88
+ - **OpenRouter:** Requires `OPENROUTER_API_KEY`.
89
+ - Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
90
+ - Exa API Key (required only if using the Researcher agent's capabilities)
91
+ - Set via the `EXA_API_KEY` environment variable.
92
+ - `uv` package manager (recommended) or `pip`.
93
+
94
+ ## MCP Server Configuration (Client-Side)
95
+
96
+ This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details on integrating external tool servers.
97
+
98
+ The `env` section within your MCP client configuration should include the API key for your chosen `LLM_PROVIDER`.
99
+
100
+ ```json
101
+ {
102
+ "mcpServers": {
103
+ "mas-sequential-thinking": {
104
+ "command": "uvx", // Or "python", "path/to/venv/bin/python" etc.
105
+ "args": [
106
+ "mcp-server-mas-sequential-thinking" // Or the path to your main script, e.g., "main.py"
107
+ ],
108
+ "env": {
109
+ "LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
110
+ // "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
111
+ "DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
112
+ // "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
113
+ "DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
114
+ "EXA_API_KEY": "your_exa_api_key" // Only if using Exa
115
+ }
116
+ }
117
+ }
118
+ }
119
+ ```
120
+
121
+ ## Installation & Setup
122
+
123
+ ### Installing via Smithery
124
+
125
+ To install Sequential Thinking Multi-Agent System for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@FradSer/mcp-server-mas-sequential-thinking):
126
+
127
+ ```bash
128
+ npx -y @smithery/cli install @FradSer/mcp-server-mas-sequential-thinking --client claude
129
+ ```
130
+
131
+ ### Manual Installation
132
+ 1. **Clone the repository:**
133
+ ```bash
134
+ git clone git@github.com:FradSer/mcp-server-mas-sequential-thinking.git
135
+ cd mcp-server-mas-sequential-thinking
136
+ ```
137
+
138
+ 2. **Set Environment Variables:**
139
+ Create a `.env` file in the project root directory or export the variables directly into your environment:
140
+ ```dotenv
141
+ # --- LLM Configuration ---
142
+ # Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
143
+ LLM_PROVIDER="deepseek"
144
+
145
+ # Provide the API key for the chosen provider:
146
+ # GROQ_API_KEY="your_groq_api_key"
147
+ DEEPSEEK_API_KEY="your_deepseek_api_key"
148
+ # OPENROUTER_API_KEY="your_openrouter_api_key"
149
+
150
+ # Optional: Base URL override (e.g., for custom DeepSeek endpoints)
151
+ # DEEPSEEK_BASE_URL="your_base_url_if_needed"
152
+
153
+ # Optional: Specify different models for Team Coordinator and Specialist Agents
154
+ # Defaults are set within the code based on the provider if these are not set.
155
+ # Example for Groq:
156
+ # GROQ_TEAM_MODEL_ID="llama3-70b-8192"
157
+ # GROQ_AGENT_MODEL_ID="llama3-8b-8192"
158
+ # Example for DeepSeek:
159
+ # DEEPSEEK_TEAM_MODEL_ID="deepseek-chat" # Note: `deepseek-reasoner` is not recommended as it doesn't support function calling
160
+ # DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
161
+ # Example for OpenRouter:
162
+ # OPENROUTER_TEAM_MODEL_ID="deepseek/deepseek-r1" # Example, adjust as needed
163
+ # OPENROUTER_AGENT_MODEL_ID="deepseek/deepseek-chat" # Example, adjust as needed
164
+
165
+ # --- External Tools ---
166
+ # Required ONLY if the Researcher agent is used and needs Exa
167
+ EXA_API_KEY="your_exa_api_key"
168
+ ```
169
+
170
+ **Note on Model Selection:**
171
+ - The `TEAM_MODEL_ID` is used by the Coordinator (`Team` object). This role benefits from strong reasoning, synthesis, and delegation capabilities. Consider using a more powerful model (e.g., `deepseek-chat`, `claude-3-opus`, `gpt-4-turbo`) here, potentially balancing capability with cost/speed.
172
+ - The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These handle focused sub-tasks. A faster or more cost-effective model (e.g., `deepseek-chat`, `claude-3-sonnet`, `llama3-8b`) might be suitable, depending on task complexity and budget/performance needs.
173
+ - Defaults are provided in the code (e.g., in `main.py`) if these environment variables are not set. Experimentation is encouraged to find the optimal balance for your use case.
174
+
175
+ 3. **Install Dependencies:**
176
+ It's highly recommended to use a virtual environment.
177
+
178
+ - **Using `uv` (Recommended):**
179
+ ```bash
180
+ # Install uv if you don't have it:
181
+ # curl -LsSf https://astral.sh/uv/install.sh | sh
182
+ # source $HOME/.cargo/env # Or restart your shell
183
+
184
+ # Create and activate a virtual environment (optional but recommended)
185
+ python -m venv .venv
186
+ source .venv/bin/activate # On Windows use `.venv\\Scripts\\activate`
187
+
188
+ # Install dependencies
189
+ uv pip install -r requirements.txt
190
+ # Or if a pyproject.toml exists with dependencies defined:
191
+ # uv pip install .
192
+ ```
193
+ - **Using `pip`:**
194
+ ```bash
195
+ # Create and activate a virtual environment (optional but recommended)
196
+ python -m venv .venv
197
+ source .venv/bin/activate # On Windows use `.venv\\Scripts\\activate`
198
+
199
+ # Install dependencies
200
+ pip install -r requirements.txt
201
+ # Or if a pyproject.toml exists with dependencies defined:
202
+ # pip install .
203
+ ```
204
+
205
+ ## Usage
206
+
207
+ Ensure your environment variables are set and the virtual environment (if used) is active.
208
+
209
+ Run the server. Choose one of the following methods:
210
+
211
+ 1. **Using `uv run` (Recommended):**
212
+ ```bash
213
+ uv --directory /path/to/mcp-server-mas-sequential-thinking run mcp-server-mas-sequential-thinking
214
+ ```
215
+ 2. **Directly using Python:**
216
+
217
+ ```bash
218
+ python main.py
219
+ ```
220
+
221
+ The server will start and listen for requests via stdio, making the `sequentialthinking` tool available to compatible MCP clients configured to use it.
222
+
223
+ ### `sequentialthinking` Tool Parameters
224
+
225
+ The tool expects arguments matching the `ThoughtData` Pydantic model:
226
+
227
+ ```python
228
+ # Simplified representation from src/models.py
229
+ class ThoughtData(BaseModel):
230
+ thought: str # Content of the current thought/step
231
+ thoughtNumber: int # Sequence number (>=1)
232
+ totalThoughts: int # Estimated total steps (>=1, suggest >=5)
233
+ nextThoughtNeeded: bool # Is another step required after this?
234
+ isRevision: bool = False # Is this revising a previous thought?
235
+ revisesThought: Optional[int] = None # If isRevision, which thought number?
236
+ branchFromThought: Optional[int] = None # If branching, from which thought?
237
+ branchId: Optional[str] = None # Unique ID for the new branch being created
238
+ needsMoreThoughts: bool = False # Signal if estimate is too low before last step
239
+ ```
240
+
241
+ ### Interacting with the Tool (Conceptual Example)
242
+
243
+ An LLM would interact with this tool iteratively:
244
+
245
+ 1. **LLM:** Uses a starter prompt (like `sequential-thinking-starter`) with the problem definition.
246
+ 2. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 1`, the initial `thought` (e.g., "Plan the analysis..."), an estimated `totalThoughts`, and `nextThoughtNeeded: True`.
247
+ 3. **Server:** MAS processes the thought. The Coordinator synthesizes responses from specialists and provides guidance (e.g., "Analysis plan complete. Suggest researching X next. No revisions recommended yet.").
248
+ 4. **LLM:** Receives the JSON response containing `coordinatorResponse`.
249
+ 5. **LLM:** Formulates the next thought based on the `coordinatorResponse` (e.g., "Research X using available tools...").
250
+ 6. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 2`, the new `thought`, potentially updated `totalThoughts`, `nextThoughtNeeded: True`.
251
+ 7. **Server:** MAS processes. The Coordinator synthesizes (e.g., "Research complete. Findings suggest a flaw in thought #1's assumption. RECOMMENDATION: Revise thought #1...").
252
+ 8. **LLM:** Receives the response, notes the recommendation.
253
+ 9. **LLM:** Formulates a revision thought.
254
+ 10. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 3`, the revision `thought`, `isRevision: True`, `revisesThought: 1`, `nextThoughtNeeded: True`.
255
+ 11. **... and so on, potentially branching or extending the process as needed.**
256
+
257
+ ### Tool Response Format
258
+
259
+ The tool returns a JSON string containing:
260
+
261
+ ```json
262
+ {
263
+ "processedThoughtNumber": int, // The thought number that was just processed
264
+ "estimatedTotalThoughts": int, // The current estimate of total thoughts
265
+ "nextThoughtNeeded": bool, // Whether the process indicates more steps are needed
266
+ "coordinatorResponse": "...", // Synthesized output from the agent team, including analysis, findings, and guidance for the next step.
267
+ "branches": ["main", "branch-id-1"], // List of active branch IDs
268
+ "thoughtHistoryLength": int, // Total number of thoughts processed so far (across all branches)
269
+ "branchDetails": {
270
+ "currentBranchId": "main", // The ID of the branch the processed thought belongs to
271
+ "branchOriginThought": null | int, // The thought number where the current branch diverged (null for 'main')
272
+ "allBranches": { // Count of thoughts in each active branch
273
+ "main": 5,
274
+ "branch-id-1": 2
275
+ }
276
+ },
277
+ "isRevision": bool, // Was the processed thought a revision?
278
+ "revisesThought": null | int, // Which thought number was revised (if isRevision is true)
279
+ "isBranch": bool, // Did this thought start a new branch?
280
+ "status": "success | validation_error | failed", // Outcome status
281
+ "error": null | "Error message..." // Error details if status is not 'success'
282
+ }
283
+ ```
284
+
285
+ ## Logging
286
+
287
+ - Logs are written to `~/.sequential_thinking/logs/sequential_thinking.log` by default. (Configuration might be adjustable in the logging setup code).
288
+ - Uses Python's standard `logging` module.
289
+ - Includes a rotating file handler (e.g., 10MB limit, 5 backups) and a console handler (typically INFO level).
290
+ - Logs include timestamps, levels, logger names, and messages, including structured representations of thoughts being processed.
291
+
292
+ ## Development
293
+
294
+ 1. **Clone the repository:** (As in Installation)
295
+ ```bash
296
+ git clone git@github.com:FradSer/mcp-server-mas-sequential-thinking.git
297
+ cd mcp-server-mas-sequential-thinking
298
+ ```
299
+ 2. **Set up Virtual Environment:** (Recommended)
300
+ ```bash
301
+ python -m venv .venv
302
+ source .venv/bin/activate # On Windows use `.venv\\Scripts\\activate`
303
+ ```
304
+ 3. **Install Dependencies (including dev):**
305
+ Ensure your `requirements-dev.txt` or `pyproject.toml` specifies development tools (like `pytest`, `ruff`, `black`, `mypy`).
306
+ ```bash
307
+ # Using uv
308
+ uv pip install -r requirements.txt
309
+ uv pip install -r requirements-dev.txt # Or install extras if defined in pyproject.toml: uv pip install -e ".[dev]"
310
+
311
+ # Using pip
312
+ pip install -r requirements.txt
313
+ pip install -r requirements-dev.txt # Or install extras if defined in pyproject.toml: pip install -e ".[dev]"
314
+ ```
315
+ 4. **Run Checks:**
316
+ Execute linters, formatters, and tests (adjust commands based on your project setup).
317
+ ```bash
318
+ # Example commands (replace with actual commands used in the project)
319
+ ruff check . --fix
320
+ black .
321
+ mypy .
322
+ pytest
323
+ ```
324
+ 5. **Contribution:**
325
+ (Consider adding contribution guidelines: branching strategy, pull request process, code style).
326
+
327
+ ## License
328
+
329
+ MIT
@@ -0,0 +1,5 @@
1
+ main.py,sha256=hfogjWnfhaMv8KY1LDfm4aAeh6OyAVqSwGHyAs4kJt8,47351
2
+ mcp_server_mas_sequential_thinking-0.2.3.dist-info/METADATA,sha256=6SrRvRbzME4BHa4nCeu_9lSSUiFZdA_MnJYTnhkwDO4,19146
3
+ mcp_server_mas_sequential_thinking-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
+ mcp_server_mas_sequential_thinking-0.2.3.dist-info/entry_points.txt,sha256=wY2jq_6PmuqyKQzNnL6famc7DXnQiEhVnq3umzNVNiE,64
5
+ mcp_server_mas_sequential_thinking-0.2.3.dist-info/RECORD,,
@@ -1,279 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: mcp-server-mas-sequential-thinking
3
- Version: 0.2.1
4
- Summary: MCP Agent Implementation for Sequential Thinking
5
- Author-email: Frad LEE <fradser@gmail.com>
6
- Requires-Python: >=3.10
7
- Requires-Dist: agno
8
- Requires-Dist: asyncio
9
- Requires-Dist: exa-py
10
- Requires-Dist: groq
11
- Requires-Dist: mcp
12
- Requires-Dist: python-dotenv
13
- Provides-Extra: dev
14
- Requires-Dist: black; extra == 'dev'
15
- Requires-Dist: isort; extra == 'dev'
16
- Requires-Dist: mypy; extra == 'dev'
17
- Requires-Dist: pytest; extra == 'dev'
18
- Description-Content-Type: text/markdown
19
-
20
- # Sequential Thinking Multi-Agent System (MAS) ![](https://img.shields.io/badge/A%20FRAD%20PRODUCT-WIP-yellow)
21
-
22
- [![Twitter Follow](https://img.shields.io/twitter/follow/FradSer?style=social)](https://twitter.com/FradSer) [![Python Version](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![Framework](https://img.shields.io/badge/Framework-Agno-orange.svg)](https://github.com/cognitivecomputations/agno)
23
-
24
- English | [简体中文](README.zh-CN.md)
25
-
26
- This project implements an advanced sequential thinking process using a **Multi-Agent System (MAS)** built with the **Agno** framework and served via **MCP**. It represents a significant evolution from simpler state-tracking approaches, leveraging coordinated specialized agents for deeper analysis and problem decomposition.
27
-
28
- ## Overview
29
-
30
- This server provides a sophisticated `sequentialthinking` tool designed for complex problem-solving. Unlike [its predecessor](https://github.com/modelcontextprotocol/servers/tree/main/src/sequentialthinking), this version utilizes a true Multi-Agent System (MAS) architecture where:
31
-
32
- * **A Coordinating Agent** (the `Team` object in `coordinate` mode) manages the workflow.
33
- * **Specialized Agents** (Planner, Researcher, Analyzer, Critic, Synthesizer) handle specific sub-tasks based on their defined roles and expertise.
34
- * Incoming thoughts are actively **processed, analyzed, and synthesized** by the agent team, not just logged.
35
- * The system supports complex thought patterns including **revisions** of previous steps and **branching** to explore alternative paths.
36
- * Integration with external tools like **Exa** (via the Researcher agent) allows for dynamic information gathering.
37
- * Robust **Pydantic** validation ensures data integrity for thought steps.
38
- * Detailed **logging** tracks the process, including agent interactions (handled by the coordinator).
39
-
40
- The goal is to achieve a higher quality of analysis and a more nuanced thinking process than possible with a single agent or simple state tracking, by harnessing the power of specialized roles working collaboratively.
41
-
42
- ## Key Differences from Original Version (TypeScript)
43
-
44
- This Python/Agno implementation marks a fundamental shift from the original TypeScript version:
45
-
46
- | Feature/Aspect | Python/Agno Version (Current) | TypeScript Version (Original) |
47
- | :------------------ | :------------------------------------------------------------------- | :--------------------------------------------------- |
48
- | **Architecture** | **Multi-Agent System (MAS)**; Active processing by a team of agents. | **Single Class State Tracker**; Simple logging/storing. |
49
- | **Intelligence** | **Distributed Agent Logic**; Embedded in specialized agents & Coordinator. | **External LLM Only**; No internal intelligence. |
50
- | **Processing** | **Active Analysis & Synthesis**; Agents *act* on the thought. | **Passive Logging**; Merely recorded the thought. |
51
- | **Frameworks** | **Agno (MAS) + FastMCP (Server)**; Uses dedicated MAS library. | **MCP SDK only**. |
52
- | **Coordination** | **Explicit Team Coordination Logic** (`Team` in `coordinate` mode). | **None**; No coordination concept. |
53
- | **Validation** | **Pydantic Schema Validation**; Robust data validation. | **Basic Type Checks**; Less reliable. |
54
- | **External Tools** | **Integrated (Exa via Researcher)**; Can perform research tasks. | **None**. |
55
- | **Logging** | **Structured Python Logging (File + Console)**; Configurable. | **Console Logging with Chalk**; Basic. |
56
- | **Language & Ecosystem** | **Python**; Leverages Python AI/ML ecosystem. | **TypeScript/Node.js**. |
57
-
58
- In essence, the system evolved from a passive thought *recorder* to an active thought *processor* powered by a collaborative team of AI agents.
59
-
60
- ## How it Works (Coordinate Mode)
61
-
62
- 1. **Initiation:** An external LLM uses the `sequential-thinking-starter` prompt to define the problem and initiate the process.
63
- 2. **Tool Call:** The LLM calls the `sequentialthinking` tool with the first (or subsequent) thought, structured according to the `ThoughtData` model.
64
- 3. **Validation & Logging:** The tool receives the call, validates the input using Pydantic, logs the incoming thought, and updates the history/branch state via `AppContext`.
65
- 4. **Coordinator Invocation:** The core thought content (with context about revisions/branches) is passed to the `SequentialThinkingTeam`'s `arun` method.
66
- 5. **Coordinator Analysis & Delegation:** The `Team` (acting as Coordinator) analyzes the input thought, breaks it into sub-tasks, and delegates these sub-tasks to the *most relevant* specialist agents (e.g., Analyzer for analysis tasks, Researcher for information needs).
67
- 6. **Specialist Execution:** Delegated agents execute their specific sub-tasks using their instructions, models, and tools (like `ThinkingTools` or `ExaTools`).
68
- 7. **Response Collection:** Specialists return their results to the Coordinator.
69
- 8. **Synthesis & Guidance:** The Coordinator synthesizes the specialists' responses into a single, cohesive output. It may include recommendations for revision or branching based on the specialists' findings (especially the Critic and Analyzer). It also adds guidance for the LLM on formulating the next thought.
70
- 9. **Return Value:** The tool returns a JSON string containing the Coordinator's synthesized response, status, and updated context (branches, history length).
71
- 10. **Iteration:** The calling LLM uses the Coordinator's response and guidance to formulate the next `sequentialthinking` tool call, potentially triggering revisions or branches as suggested.
72
-
73
- ## Token Consumption Warning
74
-
75
- ⚠️ **High Token Usage:** Due to the Multi-Agent System architecture, this tool consumes significantly **more tokens** than single-agent alternatives or the previous TypeScript version. Each `sequentialthinking` call invokes:
76
- * The Coordinator agent (the `Team` itself).
77
- * Multiple specialist agents (potentially Planner, Researcher, Analyzer, Critic, Synthesizer, depending on the Coordinator's delegation).
78
-
79
- This parallel processing leads to substantially higher token usage (potentially 3-6x or more per thought step) compared to single-agent or state-tracking approaches. Budget and plan accordingly. This tool prioritizes **analysis depth and quality** over token efficiency.
80
-
81
- ## Prerequisites
82
-
83
- * Python 3.10+
84
- * Access to a compatible LLM API (configured for `agno`). The system now supports:
85
- * **Groq:** Requires `GROQ_API_KEY`.
86
- * **DeepSeek:** Requires `DEEPSEEK_API_KEY`.
87
- * **OpenRouter:** Requires `OPENROUTER_API_KEY`.
88
- * Configure the desired provider using the `LLM_PROVIDER` environment variable (defaults to `deepseek`).
89
- * Exa API Key (if using the Researcher agent's capabilities)
90
- * `EXA_API_KEY` environment variable.
91
- * `uv` package manager (recommended) or `pip`.
92
-
93
- ## MCP Server Configuration (Client-Side)
94
-
95
- This server runs as a standard executable script that communicates via stdio, as expected by MCP. The exact configuration method depends on your specific MCP client implementation. Consult your client's documentation for details.
96
-
97
- The `env` section should include the API key for your chosen `LLM_PROVIDER`.
98
-
99
- ```json
100
- {
101
- "mcpServers": {
102
- "mas-sequential-thinking": {
103
- "command": "uvx",
104
- "args": [
105
- "mcp-server-mas-sequential-thinking"
106
- ],
107
- "env": {
108
- "LLM_PROVIDER": "deepseek", // Or "groq", "openrouter"
109
- // "GROQ_API_KEY": "your_groq_api_key", // Only if LLM_PROVIDER="groq"
110
- "DEEPSEEK_API_KEY": "your_deepseek_api_key", // Default provider
111
- // "OPENROUTER_API_KEY": "your_openrouter_api_key", // Only if LLM_PROVIDER="openrouter"
112
- "DEEPSEEK_BASE_URL": "your_base_url_if_needed", // Optional: If using a custom endpoint for DeepSeek
113
- "EXA_API_KEY": "your_exa_api_key" // Only if using Exa
114
- }
115
- }
116
- }
117
- }
118
- ```
119
-
120
- ## Installation & Setup
121
-
122
- 1. **Clone the repository:**
123
- ```bash
124
- git clone git@github.com:FradSer/mcp-server-mas-sequential-thinking.git
125
- cd mcp-server-mas-sequential-thinking
126
- ```
127
-
128
- 2. **Set Environment Variables:**
129
- Create a `.env` file in the root directory or export the variables:
130
- ```dotenv
131
- # --- LLM Configuration ---
132
- # Select the LLM provider: "deepseek" (default), "groq", or "openrouter"
133
- LLM_PROVIDER="deepseek"
134
-
135
- # Provide the API key for the chosen provider:
136
- # GROQ_API_KEY="your_groq_api_key"
137
- DEEPSEEK_API_KEY="your_deepseek_api_key"
138
- # OPENROUTER_API_KEY="your_openrouter_api_key"
139
-
140
- # Optional: Base URL override (e.g., for custom DeepSeek endpoints)
141
- DEEPSEEK_BASE_URL="your_base_url_if_needed"
142
-
143
- # Optional: Specify different models for Team Coordinator and Specialist Agents
144
- # Defaults are set within the code based on the provider if these are not set.
145
- # Example for Groq:
146
- # GROQ_TEAM_MODEL_ID="llama3-70b-8192"
147
- # GROQ_AGENT_MODEL_ID="llama3-8b-8192"
148
- # Example for DeepSeek:
149
- # DEEPSEEK_TEAM_MODEL_ID="deepseek-reasoner" # Recommended for coordination
150
- # DEEPSEEK_AGENT_MODEL_ID="deepseek-chat" # Recommended for specialists
151
- # Example for OpenRouter:
152
- # OPENROUTER_TEAM_MODEL_ID="anthropic/claude-3-haiku-20240307"
153
- # OPENROUTER_AGENT_MODEL_ID="google/gemini-flash-1.5"
154
-
155
- # --- External Tools ---
156
- # Required ONLY if the Researcher agent is used and needs Exa
157
- EXA_API_KEY="your_exa_api_key"
158
- ```
159
-
160
- **Note on Model Selection:**
161
-
162
- * The `TEAM_MODEL_ID` is used by the Coordinator (the `Team` object itself). This role requires strong reasoning, synthesis, and delegation capabilities. Using a more powerful model (like `deepseek-reasoner`, `claude-3-opus`, or `gpt-4-turbo`) is often beneficial here, even if it's slower or more expensive.
163
- * The `AGENT_MODEL_ID` is used by the specialist agents (Planner, Researcher, etc.). These agents handle more focused sub-tasks. You might choose a faster or more cost-effective model (like `deepseek-chat`, `claude-3-sonnet`, `llama3-70b`) for specialists, depending on the complexity of the tasks they typically handle and your budget/performance requirements.
164
- * The defaults provided in `main.py` (e.g., `deepseek-reasoner` for agents when using DeepSeek) are starting points. Experimentation is encouraged to find the optimal balance for your specific use case.
165
-
166
- 3. **Install Dependencies:**
167
-
168
- * **Using `uv` (Recommended):**
169
- ```bash
170
- # Install uv if you don't have it:
171
- # curl -LsSf [https://astral.sh/uv/install.sh](https://astral.sh/uv/install.sh) | sh
172
- # source $HOME/.cargo/env # Or restart your shell
173
-
174
- uv pip install -r requirements.txt
175
- # Or if a pyproject.toml exists with dependencies:
176
- # uv pip install .
177
- ```
178
- * **Using `pip`:**
179
- ```bash
180
- pip install -r requirements.txt
181
- # Or if a pyproject.toml exists with dependencies:
182
- # pip install .
183
- ```
184
-
185
- ## Usage
186
-
187
- Run the server script (assuming the main script is named `main.py` or similar based on your file structure):
188
-
189
- ```bash
190
- python your_main_script_name.py
191
- ```
192
-
193
- The server will start and listen for requests via stdio, making the `sequentialthinking` tool available to compatible MCP clients (like certain LLMs or testing frameworks).
194
-
195
- ### `sequentialthinking` Tool Parameters
196
-
197
- The tool expects arguments matching the `ThoughtData` Pydantic model:
198
-
199
- ```python
200
- # Simplified representation
201
- {
202
- "thought": str, # Content of the current thought/step
203
- "thoughtNumber": int, # Sequence number (>=1)
204
- "totalThoughts": int, # Estimated total steps (>=1, suggest >=5)
205
- "nextThoughtNeeded": bool, # Is another step required after this?
206
- "isRevision": bool = False, # Is this revising a previous thought?
207
- "revisesThought": Optional[int] = None, # If isRevision, which thought number?
208
- "branchFromThought": Optional[int] = None, # If branching, from which thought?
209
- "branchId": Optional[str] = None, # Unique ID for the branch
210
- "needsMoreThoughts": bool = False # Signal if estimate is too low before last step
211
- }
212
- ```
213
-
214
- ### Interacting with the Tool (Conceptual Example)
215
-
216
- An LLM would interact with this tool iteratively:
217
-
218
- 1. **LLM:** Uses `sequential-thinking-starter` prompt with the problem.
219
- 2. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 1`, initial `thought` (e.g., "Plan the analysis..."), `totalThoughts` estimate, `nextThoughtNeeded: True`.
220
- 3. **Server:** MAS processes the thought -> Coordinator synthesizes response & provides guidance (e.g., "Analysis plan complete. Suggest researching X next. No revisions recommended yet.").
221
- 4. **LLM:** Receives JSON response containing `coordinatorResponse`.
222
- 5. **LLM:** Formulates the next thought (e.g., "Research X using Exa...") based on the `coordinatorResponse`.
223
- 6. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 2`, the new `thought`, updated `totalThoughts` (if needed), `nextThoughtNeeded: True`.
224
- 7. **Server:** MAS processes -> Coordinator synthesizes (e.g., "Research complete. Findings suggest a flaw in thought #1's assumption. RECOMMENDATION: Revise thought #1...").
225
- 8. **LLM:** Receives response, sees the recommendation.
226
- 9. **LLM:** Formulates a revision thought.
227
- 10. **LLM:** Calls `sequentialthinking` tool with `thoughtNumber: 3`, the revision `thought`, `isRevision: True`, `revisesThought: 1`, `nextThoughtNeeded: True`.
228
- 11. **... and so on, potentially branching or extending as needed.**
229
-
230
- ### Tool Response Format
231
-
232
- The tool returns a JSON string containing:
233
-
234
- ```json
235
- {
236
- "processedThoughtNumber": int,
237
- "estimatedTotalThoughts": int,
238
- "nextThoughtNeeded": bool,
239
- "coordinatorResponse": "Synthesized output from the agent team, including analysis, findings, and guidance for the next step...",
240
- "branches": ["list", "of", "branch", "ids"],
241
- "thoughtHistoryLength": int,
242
- "branchDetails": {
243
- "currentBranchId": "main | branchId",
244
- "branchOriginThought": null | int,
245
- "allBranches": {"main": count, "branchId": count, ...}
246
- },
247
- "isRevision": bool,
248
- "revisesThought": null | int,
249
- "isBranch": bool,
250
- "status": "success | validation_error | failed",
251
- "error": "Error message if status is not success" // Optional
252
- }
253
- ```
254
-
255
- ## Logging
256
-
257
- * Logs are written to `~/.sequential_thinking/logs/sequential_thinking.log`.
258
- * Uses Python's standard `logging` module.
259
- * Includes rotating file handler (10MB limit, 5 backups) and console handler (INFO level).
260
- * Logs include timestamps, levels, logger names, and messages, including formatted thought representations.
261
-
262
- ## Development
263
-
264
- (Add development guidelines here if applicable, e.g., setting up dev environments, running tests, linting.)
265
-
266
- 1. Clone the repository.
267
- 2. Set up a virtual environment.
268
- 3. Install dependencies, potentially including development extras:
269
- ```bash
270
- # Using uv
271
- uv pip install -e ".[dev]"
272
- # Using pip
273
- pip install -e ".[dev]"
274
- ```
275
- 4. Run linters/formatters/tests.
276
-
277
- ## License
278
-
279
- MIT
@@ -1,5 +0,0 @@
1
- main.py,sha256=Vm6SBMDmvFy9CwEmqI-ZqZ0YDLUgPA_E689La_Qc4Yo,44071
2
- mcp_server_mas_sequential_thinking-0.2.1.dist-info/METADATA,sha256=bIJGHFTRT2hldImbnA0xKyp4i8BRcc78WihpJAcMf5A,15807
3
- mcp_server_mas_sequential_thinking-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
- mcp_server_mas_sequential_thinking-0.2.1.dist-info/entry_points.txt,sha256=wY2jq_6PmuqyKQzNnL6famc7DXnQiEhVnq3umzNVNiE,64
5
- mcp_server_mas_sequential_thinking-0.2.1.dist-info/RECORD,,