sequential-thinking 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,219 @@
1
+ from collections import Counter
2
+ from typing import Any
3
+ from uuid import UUID
4
+
5
+ from .logging_conf import configure_logging
6
+ from .models import ThoughtData, ThoughtStage
7
+
8
+ logger = configure_logging("sequential-thinking.analysis")
9
+
10
+
11
+ class ThoughtAnalyzer:
12
+ """Analyzer for thought data to extract insights and patterns."""
13
+
14
+ @staticmethod
15
+ def find_related_thoughts(
16
+ current_thought: ThoughtData, all_thoughts: list[ThoughtData], max_results: int = 3
17
+ ) -> list[ThoughtData]:
18
+ """Find thoughts related to the current thought.
19
+
20
+ Args:
21
+ current_thought: The current thought to find related thoughts for
22
+ all_thoughts: All available thoughts to search through
23
+ max_results: Maximum number of related thoughts to return
24
+
25
+ Returns:
26
+ List[ThoughtData]: Related thoughts, sorted by relevance
27
+ """
28
+ # First, find thoughts in the same stage
29
+ same_stage = [
30
+ t
31
+ for t in all_thoughts
32
+ if t.stage == current_thought.stage and t.id != current_thought.id
33
+ ]
34
+
35
+ # Then, find thoughts with similar tags
36
+ if current_thought.tags:
37
+ tag_matches: list[tuple[ThoughtData, int]] = []
38
+ for thought in all_thoughts:
39
+ if thought.id == current_thought.id:
40
+ continue
41
+
42
+ # Count matching tags
43
+ matching_tags = set(current_thought.tags) & set(thought.tags)
44
+ if matching_tags:
45
+ tag_matches.append((thought, len(matching_tags)))
46
+
47
+ # Sort by number of matching tags (descending)
48
+ tag_matches.sort(key=lambda x: x[1], reverse=True)
49
+ tag_related = [t[0] for t in tag_matches]
50
+ else:
51
+ tag_related: list[ThoughtData] = []
52
+
53
+ # Combine and deduplicate results
54
+ combined: list[ThoughtData] = []
55
+ seen_ids: set[UUID] = set()
56
+
57
+ # First add same stage thoughts
58
+ for thought in same_stage:
59
+ if thought.id not in seen_ids:
60
+ combined.append(thought)
61
+ seen_ids.add(thought.id)
62
+
63
+ if len(combined) >= max_results:
64
+ break
65
+
66
+ # Then add tag-related thoughts
67
+ if len(combined) < max_results:
68
+ for thought in tag_related:
69
+ if thought.id not in seen_ids:
70
+ combined.append(thought)
71
+ seen_ids.add(thought.id)
72
+
73
+ if len(combined) >= max_results:
74
+ break
75
+
76
+ return combined
77
+
78
+ @staticmethod
79
+ def generate_summary(thoughts: list[ThoughtData]) -> dict[str, Any]:
80
+ """Generate a summary of the thinking process.
81
+
82
+ Args:
83
+ thoughts: List of thoughts to summarize
84
+
85
+ Returns:
86
+ Dict[str, Any]: Summary data
87
+ """
88
+ if not thoughts:
89
+ return {"summary": "No thoughts recorded yet"}
90
+
91
+ # Group thoughts by stage
92
+ stages: dict[str, list[ThoughtData]] = {}
93
+ for thought in thoughts:
94
+ if thought.stage.value not in stages:
95
+ stages[thought.stage.value] = []
96
+ stages[thought.stage.value].append(thought)
97
+
98
+ # Count tags - using a more readable approach with explicit steps
99
+ # Collect all tags from all thoughts
100
+ all_tags: list[str] = []
101
+ for thought in thoughts:
102
+ all_tags.extend(thought.tags)
103
+
104
+ # Count occurrences of each tag
105
+ tag_counts: Counter[str] = Counter(all_tags)
106
+
107
+ # Get the 5 most common tags
108
+ top_tags: list[tuple[str, int]] = tag_counts.most_common(5)
109
+
110
+ # Create summary
111
+ try:
112
+ # Safely calculate max total thoughts to avoid division by zero
113
+ max_total = 0
114
+ if thoughts:
115
+ max_total = max((t.total_thoughts for t in thoughts), default=0)
116
+
117
+ # Calculate percent complete safely
118
+ percent_complete = 0
119
+ if max_total > 0:
120
+ percent_complete = (len(thoughts) / max_total) * 100
121
+
122
+ logger.debug(
123
+ f"Calculating completion: {len(thoughts)}/{max_total} = {percent_complete}%"
124
+ )
125
+
126
+ # Build the summary dictionary with more readable and
127
+ # maintainable list comprehensions
128
+
129
+ # Count thoughts by stage
130
+ stage_counts: dict[str, int] = {
131
+ stage: len(thoughts_list) for stage, thoughts_list in stages.items()
132
+ }
133
+
134
+ # Create timeline entries
135
+ sorted_thoughts = sorted(thoughts, key=lambda x: x.thought_number)
136
+ timeline_entries: list[dict[str, int | str]] = []
137
+ for t in sorted_thoughts:
138
+ timeline_entries.append({"number": t.thought_number, "stage": t.stage.value})
139
+
140
+ # Create top tags entries
141
+ top_tags_entries: list[dict[str, int | str]] = []
142
+ for tag, count in top_tags:
143
+ top_tags_entries.append({"tag": tag, "count": count})
144
+
145
+ # Check if all stages are represented
146
+ all_stages_present = all(stage.value in stages for stage in ThoughtStage)
147
+
148
+ # Assemble the final summary
149
+ summary: dict[str, Any] = {
150
+ "totalThoughts": len(thoughts),
151
+ "stages": stage_counts,
152
+ "timeline": timeline_entries,
153
+ "topTags": top_tags_entries,
154
+ "completionStatus": {
155
+ "hasAllStages": all_stages_present,
156
+ "percentComplete": percent_complete,
157
+ },
158
+ }
159
+ except Exception as e:
160
+ logger.error(f"Error generating summary: {e}")
161
+ summary = {"totalThoughts": len(thoughts), "error": str(e)}
162
+
163
+ return {"summary": summary}
164
+
165
+ @staticmethod
166
+ def analyze_thought(thought: ThoughtData, all_thoughts: list[ThoughtData]) -> dict[str, Any]:
167
+ """Analyze a single thought in the context of all thoughts.
168
+
169
+ Args:
170
+ thought: The thought to analyze
171
+ all_thoughts: All available thoughts for context
172
+
173
+ Returns:
174
+ Dict[str, Any]: Analysis results
175
+ """
176
+ # Find related thoughts
177
+ related_thoughts = ThoughtAnalyzer.find_related_thoughts(thought, all_thoughts)
178
+
179
+ # Check if this is the first thought in its stage (lowest thought_number)
180
+ same_stage_thoughts = [t for t in all_thoughts if t.stage == thought.stage]
181
+ is_first_in_stage = all(
182
+ t.thought_number >= thought.thought_number for t in same_stage_thoughts
183
+ )
184
+
185
+ # Calculate progress
186
+ progress = (thought.thought_number / thought.total_thoughts) * 100
187
+
188
+ # Create analysis
189
+ return {
190
+ "thoughtAnalysis": {
191
+ "currentThought": {
192
+ "thoughtNumber": thought.thought_number,
193
+ "totalThoughts": thought.total_thoughts,
194
+ "nextThoughtNeeded": thought.next_thought_needed,
195
+ "stage": thought.stage.value,
196
+ "tags": thought.tags,
197
+ "timestamp": thought.timestamp,
198
+ },
199
+ "analysis": {
200
+ "relatedThoughtsCount": len(related_thoughts),
201
+ "relatedThoughtSummaries": [
202
+ {
203
+ "thoughtNumber": t.thought_number,
204
+ "stage": t.stage.value,
205
+ "snippet": (
206
+ t.thought[:100] + "..." if len(t.thought) > 100 else t.thought
207
+ ),
208
+ }
209
+ for t in related_thoughts
210
+ ],
211
+ "progress": progress,
212
+ "isFirstInStage": is_first_in_stage,
213
+ },
214
+ "context": {
215
+ "thoughtHistoryLength": len(all_thoughts),
216
+ "currentStage": thought.stage.value,
217
+ },
218
+ }
219
+ }
@@ -0,0 +1,22 @@
1
+ import logging
2
+ import sys
3
+
4
+
5
+ def configure_logging(name: str = "sequential-thinking") -> logging.Logger:
6
+ """Configure and return a logger with standardized settings.
7
+
8
+ Args:
9
+ name: The name for the logger
10
+
11
+ Returns:
12
+ logging.Logger: Configured logger instance
13
+ """
14
+ # Configure root logger
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
18
+ handlers=[logging.StreamHandler(sys.stderr)],
19
+ )
20
+
21
+ # Get and return the named logger
22
+ return logging.getLogger(name)
@@ -0,0 +1,200 @@
1
+ from datetime import datetime
2
+ from enum import Enum
3
+ from typing import Any
4
+ from uuid import UUID, uuid4
5
+
6
+ from pydantic import BaseModel, Field, ValidationInfo, field_validator
7
+
8
+
9
+ class ThoughtStage(Enum):
10
+ """Basic thinking stages for structured sequential thinking."""
11
+
12
+ PROBLEM_DEFINITION = "Problem Definition"
13
+ RESEARCH = "Research"
14
+ ANALYSIS = "Analysis"
15
+ SYNTHESIS = "Synthesis"
16
+ CONCLUSION = "Conclusion"
17
+
18
+ @classmethod
19
+ def from_string(cls, value: str) -> "ThoughtStage":
20
+ """Convert a string to a thinking stage.
21
+
22
+ Args:
23
+ value: The string representation of the thinking stage
24
+
25
+ Returns:
26
+ ThoughtStage: The corresponding ThoughtStage enum value
27
+
28
+ Raises:
29
+ ValueError: If the string does not match any valid thinking stage
30
+ """
31
+ # Case-insensitive comparison
32
+ for stage in cls:
33
+ if stage.value.casefold() == value.casefold():
34
+ return stage
35
+
36
+ # If no match found
37
+ valid_stages = ", ".join(stage.value for stage in cls)
38
+ raise ValueError(f"Invalid thinking stage: '{value}'. Valid stages are: {valid_stages}")
39
+
40
+
41
+ class ThoughtData(BaseModel):
42
+ """Data structure for a single thought in the sequential thinking process."""
43
+
44
+ thought: str
45
+ thought_number: int
46
+ total_thoughts: int
47
+ next_thought_needed: bool
48
+ stage: ThoughtStage
49
+ tags: list[str] = Field(default_factory=list)
50
+ axioms_used: list[str] = Field(default_factory=list)
51
+ assumptions_challenged: list[str] = Field(default_factory=list)
52
+ timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
53
+ id: UUID = Field(default_factory=uuid4)
54
+
55
+ def __hash__(self) -> int:
56
+ """Make ThoughtData hashable based on its ID."""
57
+ return hash(self.id)
58
+
59
+ def __eq__(self, other: object) -> bool:
60
+ """Compare ThoughtData objects based on their ID."""
61
+ if not isinstance(other, ThoughtData):
62
+ return False
63
+ return self.id == other.id
64
+
65
+ @field_validator("thought")
66
+ def thought_not_empty(cls, v: str) -> str:
67
+ """Validate that thought content is not empty."""
68
+ if not v or not v.strip():
69
+ raise ValueError("Thought content cannot be empty")
70
+ return v
71
+
72
+ @field_validator("thought_number")
73
+ def thought_number_positive(cls, v: int) -> int:
74
+ """Validate that thought number is positive."""
75
+ if v < 1:
76
+ raise ValueError("Thought number must be positive")
77
+ return v
78
+
79
+ @field_validator("total_thoughts")
80
+ def total_thoughts_valid(cls, v: int, info: ValidationInfo) -> int:
81
+ """Validate that total thoughts is valid."""
82
+ thought_number = info.data.get("thought_number")
83
+ if thought_number is not None and v < thought_number:
84
+ raise ValueError("Total thoughts must be greater or equal to current thought number")
85
+ return v
86
+
87
+ def validate(self) -> bool: # pyright: ignore[reportIncompatibleMethodOverride]
88
+ """Legacy validation method for backward compatibility.
89
+
90
+ Returns:
91
+ bool: True if the thought data is valid
92
+
93
+ Raises:
94
+ ValueError: If any validation checks fail
95
+ """
96
+ # Validation is now handled by Pydantic automatically
97
+ return True
98
+
99
+ def to_dict(self, include_id: bool = False) -> dict[str, Any]:
100
+ """Convert the thought data to a dictionary representation.
101
+
102
+ Args:
103
+ include_id: Whether to include the ID in the dictionary representation.
104
+ Default is False to maintain compatibility with tests.
105
+
106
+ Returns:
107
+ dict: Dictionary representation of the thought data
108
+ """
109
+ from .utils import to_camel_case
110
+
111
+ # Get all model fields, excluding internal properties
112
+ data = self.model_dump()
113
+
114
+ # Handle special conversions
115
+ data["stage"] = self.stage.value
116
+
117
+ if not include_id:
118
+ # Remove ID for external representations
119
+ data.pop("id", None)
120
+ else:
121
+ # Convert ID to string for JSON serialization
122
+ data["id"] = str(data["id"])
123
+
124
+ # Convert snake_case keys to camelCase for API consistency
125
+ result: dict[str, Any] = {}
126
+ for key, value in data.items():
127
+ if key == "stage":
128
+ # Stage is already handled above
129
+ continue
130
+
131
+ camel_key = to_camel_case(key)
132
+ result[camel_key] = value
133
+
134
+ # Ensure these fields are always present with camelCase naming
135
+ result["thought"] = self.thought
136
+ result["thoughtNumber"] = self.thought_number
137
+ result["totalThoughts"] = self.total_thoughts
138
+ result["nextThoughtNeeded"] = self.next_thought_needed
139
+ result["stage"] = self.stage.value
140
+ result["tags"] = self.tags
141
+ result["axiomsUsed"] = self.axioms_used
142
+ result["assumptionsChallenged"] = self.assumptions_challenged
143
+ result["timestamp"] = self.timestamp
144
+
145
+ return result
146
+
147
+ @classmethod
148
+ def from_dict(cls, data: dict[str, Any]) -> "ThoughtData":
149
+ """Create a ThoughtData instance from a dictionary.
150
+
151
+ Args:
152
+ data: Dictionary containing thought data
153
+
154
+ Returns:
155
+ ThoughtData: A new ThoughtData instance
156
+ """
157
+ # Convert any camelCase keys to snake_case
158
+ snake_data: dict[str, Any] = {}
159
+ mappings = {
160
+ "thoughtNumber": "thought_number",
161
+ "totalThoughts": "total_thoughts",
162
+ "nextThoughtNeeded": "next_thought_needed",
163
+ "axiomsUsed": "axioms_used",
164
+ "assumptionsChallenged": "assumptions_challenged",
165
+ }
166
+
167
+ # Process known direct mappings
168
+ for camel_key, snake_key in mappings.items():
169
+ if camel_key in data:
170
+ snake_data[snake_key] = data[camel_key]
171
+
172
+ # Copy fields that don't need conversion
173
+ for key in ["thought", "tags", "timestamp"]:
174
+ if key in data:
175
+ snake_data[key] = data[key]
176
+
177
+ # Handle special fields
178
+ if "stage" in data:
179
+ stage_value = data["stage"]
180
+ if not isinstance(stage_value, str):
181
+ raise ValueError("Stage must be a string")
182
+ snake_data["stage"] = ThoughtStage.from_string(stage_value)
183
+
184
+ # Set default values for missing fields
185
+ snake_data.setdefault("tags", [])
186
+ snake_data.setdefault("axioms_used", data.get("axiomsUsed", []))
187
+ snake_data.setdefault("assumptions_challenged", data.get("assumptionsChallenged", []))
188
+ snake_data.setdefault("timestamp", datetime.now().isoformat())
189
+
190
+ # Add ID if present, otherwise generate a new one
191
+ if "id" in data:
192
+ try:
193
+ id_value = data["id"]
194
+ snake_data["id"] = UUID(id_value) if isinstance(id_value, str) else uuid4()
195
+ except (ValueError, TypeError):
196
+ snake_data["id"] = uuid4()
197
+
198
+ return cls(**snake_data)
199
+
200
+ model_config = {"arbitrary_types_allowed": True}
@@ -0,0 +1,213 @@
1
+ import os
2
+ import sys
3
+ from typing import Any
4
+
5
+ from mcp.server.fastmcp import Context, FastMCP
6
+
7
+ # Use absolute imports when running as a script
8
+ try:
9
+ # When installed as a package
10
+ from .analysis import ThoughtAnalyzer
11
+ from .logging_conf import configure_logging
12
+ from .models import ThoughtData, ThoughtStage
13
+ from .storage import ThoughtStorage
14
+ except ImportError:
15
+ # When run directly
16
+ from mcp_sequential_thinking.analysis import ThoughtAnalyzer
17
+ from mcp_sequential_thinking.logging_conf import configure_logging
18
+ from mcp_sequential_thinking.models import ThoughtData, ThoughtStage
19
+ from mcp_sequential_thinking.storage import ThoughtStorage
20
+
21
+ logger = configure_logging("sequential-thinking.server")
22
+
23
+
24
+ mcp = FastMCP("sequential-thinking")
25
+
26
+ storage_dir = os.environ.get("MCP_STORAGE_DIR", None)
27
+ storage = ThoughtStorage(storage_dir)
28
+
29
+
30
+ @mcp.tool() # pyright: ignore[reportUnknownMemberType, reportUntypedFunctionDecorator]
31
+ async def process_thought(
32
+ thought: str,
33
+ thought_number: int,
34
+ total_thoughts: int,
35
+ next_thought_needed: bool,
36
+ stage: str,
37
+ tags: list[str] = [],
38
+ axioms_used: list[str] = [],
39
+ assumptions_challenged: list[str] = [],
40
+ ctx: Context[Any, Any, Any] | None = None,
41
+ ) -> dict[str, Any]:
42
+ """Add a sequential thought with its metadata.
43
+
44
+ Args:
45
+ thought: The content of the thought
46
+ thought_number: The sequence number of this thought
47
+ total_thoughts: The total expected thoughts in the sequence
48
+ next_thought_needed: Whether more thoughts are needed after this one
49
+ stage: The thinking stage (Problem Definition, Research, Analysis, Synthesis, Conclusion)
50
+ tags: Optional keywords or categories for the thought
51
+ axioms_used: Optional list of principles or axioms used in this thought
52
+ assumptions_challenged: Optional list of assumptions challenged by this thought
53
+ ctx: Optional MCP context object
54
+
55
+ Returns:
56
+ dict: Analysis of the processed thought
57
+ """
58
+ try:
59
+ # Log the request
60
+ logger.info(f"Processing thought #{thought_number}/{total_thoughts} in stage '{stage}'")
61
+
62
+ # Report progress if context is available
63
+ if ctx:
64
+ await ctx.report_progress(thought_number - 1, total_thoughts)
65
+
66
+ # Convert stage string to enum
67
+ thought_stage = ThoughtStage.from_string(stage)
68
+
69
+ # Create thought data object with defaults for optional fields
70
+ thought_data = ThoughtData(
71
+ thought=thought,
72
+ thought_number=thought_number,
73
+ total_thoughts=total_thoughts,
74
+ next_thought_needed=next_thought_needed,
75
+ stage=thought_stage,
76
+ tags=tags,
77
+ axioms_used=axioms_used,
78
+ assumptions_challenged=assumptions_challenged,
79
+ )
80
+
81
+ # Validate and store
82
+ thought_data.validate()
83
+ storage.add_thought(thought_data)
84
+
85
+ # Get all thoughts for analysis
86
+ all_thoughts = storage.get_all_thoughts()
87
+
88
+ # Analyze the thought
89
+ analysis = ThoughtAnalyzer.analyze_thought(thought_data, all_thoughts)
90
+
91
+ # Log success
92
+ logger.info(f"Successfully processed thought #{thought_number}")
93
+
94
+ return analysis
95
+ except Exception as e:
96
+ logger.error(f"Error processing thought: {e!s}")
97
+
98
+ return {"error": str(e), "status": "failed"}
99
+
100
+
101
+ @mcp.tool() # pyright: ignore[reportUnknownMemberType, reportUntypedFunctionDecorator]
102
+ def generate_summary() -> dict[str, Any]:
103
+ """Generate a summary of the entire thinking process.
104
+
105
+ Returns:
106
+ dict: Summary of the thinking process
107
+ """
108
+ try:
109
+ logger.info("Generating thinking process summary")
110
+
111
+ # Get all thoughts
112
+ all_thoughts = storage.get_all_thoughts()
113
+
114
+ # Generate summary
115
+ return ThoughtAnalyzer.generate_summary(all_thoughts)
116
+ except Exception as e:
117
+ logger.error(f"Error generating summary: {e!s}")
118
+ return {"error": str(e), "status": "failed"}
119
+
120
+
121
+ @mcp.tool() # pyright: ignore[reportUnknownMemberType, reportUntypedFunctionDecorator]
122
+ def clear_history() -> dict[str, Any]:
123
+ """Clear the thought history.
124
+
125
+ Returns:
126
+ dict: Status message
127
+ """
128
+ try:
129
+ logger.info("Clearing thought history")
130
+ storage.clear_history()
131
+ return {"status": "success", "message": "Thought history cleared"}
132
+ except Exception as e:
133
+ logger.error(f"Error clearing history: {e!s}")
134
+ return {"error": str(e), "status": "failed"}
135
+
136
+
137
+ @mcp.tool() # pyright: ignore[reportUnknownMemberType, reportUntypedFunctionDecorator]
138
+ def export_session(file_path: str) -> dict[str, Any]:
139
+ """Export the current thinking session to a file.
140
+
141
+ Args:
142
+ file_path: Path to save the exported session
143
+
144
+ Returns:
145
+ dict: Status message
146
+ """
147
+ try:
148
+ logger.info(f"Exporting session to {file_path}")
149
+ storage.export_session(file_path)
150
+ return {"status": "success", "message": f"Session exported to {file_path}"}
151
+ except Exception as e:
152
+ logger.error(f"Error exporting session: {e!s}")
153
+ return {"error": str(e), "status": "failed"}
154
+
155
+
156
+ @mcp.tool() # pyright: ignore[reportUnknownMemberType, reportUntypedFunctionDecorator]
157
+ def import_session(file_path: str) -> dict[str, Any]:
158
+ """Import a thinking session from a file.
159
+
160
+ Args:
161
+ file_path: Path to the file to import
162
+
163
+ Returns:
164
+ dict: Status message
165
+ """
166
+ try:
167
+ logger.info(f"Importing session from {file_path}")
168
+ storage.import_session(file_path)
169
+ return {"status": "success", "message": f"Session imported from {file_path}"}
170
+ except Exception as e:
171
+ logger.error(f"Error importing session: {e!s}")
172
+ return {"error": str(e), "status": "failed"}
173
+
174
+
175
+ def main() -> None:
176
+ """Entry point for the MCP server."""
177
+ logger.info("Starting Sequential Thinking MCP server")
178
+
179
+ # Ensure UTF-8 encoding for stdin/stdout
180
+ if hasattr(sys.stdout, "buffer") and sys.stdout.encoding != "utf-8":
181
+ import io
182
+
183
+ sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", line_buffering=True)
184
+ if hasattr(sys.stdin, "buffer") and sys.stdin.encoding != "utf-8":
185
+ import io
186
+
187
+ sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8", line_buffering=True)
188
+
189
+ # Flush stdout to ensure no buffered content remains
190
+ sys.stdout.flush()
191
+
192
+ # Run the MCP server
193
+ mcp.run()
194
+
195
+
196
+ if __name__ == "__main__":
197
+ # When running the script directly, ensure we're in the right directory
198
+ import os
199
+ import sys
200
+
201
+ # Add the parent directory to sys.path if needed
202
+ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
203
+ if parent_dir not in sys.path:
204
+ sys.path.insert(0, parent_dir)
205
+
206
+ # Print debug information
207
+ logger.info(f"Python version: {sys.version}")
208
+ logger.info(f"Current working directory: {os.getcwd()}")
209
+ logger.info(f"Script directory: {os.path.dirname(os.path.abspath(__file__))}")
210
+ logger.info(f"Parent directory added to path: {parent_dir}")
211
+
212
+ # Run the server
213
+ main()