atlas-chat 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. atlas/__init__.py +40 -0
  2. atlas/application/__init__.py +7 -0
  3. atlas/application/chat/__init__.py +7 -0
  4. atlas/application/chat/agent/__init__.py +10 -0
  5. atlas/application/chat/agent/act_loop.py +179 -0
  6. atlas/application/chat/agent/factory.py +142 -0
  7. atlas/application/chat/agent/protocols.py +46 -0
  8. atlas/application/chat/agent/react_loop.py +338 -0
  9. atlas/application/chat/agent/think_act_loop.py +171 -0
  10. atlas/application/chat/approval_manager.py +151 -0
  11. atlas/application/chat/elicitation_manager.py +191 -0
  12. atlas/application/chat/events/__init__.py +1 -0
  13. atlas/application/chat/events/agent_event_relay.py +112 -0
  14. atlas/application/chat/modes/__init__.py +1 -0
  15. atlas/application/chat/modes/agent.py +125 -0
  16. atlas/application/chat/modes/plain.py +74 -0
  17. atlas/application/chat/modes/rag.py +81 -0
  18. atlas/application/chat/modes/tools.py +179 -0
  19. atlas/application/chat/orchestrator.py +213 -0
  20. atlas/application/chat/policies/__init__.py +1 -0
  21. atlas/application/chat/policies/tool_authorization.py +99 -0
  22. atlas/application/chat/preprocessors/__init__.py +1 -0
  23. atlas/application/chat/preprocessors/message_builder.py +92 -0
  24. atlas/application/chat/preprocessors/prompt_override_service.py +104 -0
  25. atlas/application/chat/service.py +454 -0
  26. atlas/application/chat/utilities/__init__.py +6 -0
  27. atlas/application/chat/utilities/error_handler.py +367 -0
  28. atlas/application/chat/utilities/event_notifier.py +546 -0
  29. atlas/application/chat/utilities/file_processor.py +613 -0
  30. atlas/application/chat/utilities/tool_executor.py +789 -0
  31. atlas/atlas_chat_cli.py +347 -0
  32. atlas/atlas_client.py +238 -0
  33. atlas/core/__init__.py +0 -0
  34. atlas/core/auth.py +205 -0
  35. atlas/core/authorization_manager.py +27 -0
  36. atlas/core/capabilities.py +123 -0
  37. atlas/core/compliance.py +215 -0
  38. atlas/core/domain_whitelist.py +147 -0
  39. atlas/core/domain_whitelist_middleware.py +82 -0
  40. atlas/core/http_client.py +28 -0
  41. atlas/core/log_sanitizer.py +102 -0
  42. atlas/core/metrics_logger.py +59 -0
  43. atlas/core/middleware.py +131 -0
  44. atlas/core/otel_config.py +242 -0
  45. atlas/core/prompt_risk.py +200 -0
  46. atlas/core/rate_limit.py +0 -0
  47. atlas/core/rate_limit_middleware.py +64 -0
  48. atlas/core/security_headers_middleware.py +51 -0
  49. atlas/domain/__init__.py +37 -0
  50. atlas/domain/chat/__init__.py +1 -0
  51. atlas/domain/chat/dtos.py +85 -0
  52. atlas/domain/errors.py +96 -0
  53. atlas/domain/messages/__init__.py +12 -0
  54. atlas/domain/messages/models.py +160 -0
  55. atlas/domain/rag_mcp_service.py +664 -0
  56. atlas/domain/sessions/__init__.py +7 -0
  57. atlas/domain/sessions/models.py +36 -0
  58. atlas/domain/unified_rag_service.py +371 -0
  59. atlas/infrastructure/__init__.py +10 -0
  60. atlas/infrastructure/app_factory.py +135 -0
  61. atlas/infrastructure/events/__init__.py +1 -0
  62. atlas/infrastructure/events/cli_event_publisher.py +140 -0
  63. atlas/infrastructure/events/websocket_publisher.py +140 -0
  64. atlas/infrastructure/sessions/in_memory_repository.py +56 -0
  65. atlas/infrastructure/transport/__init__.py +7 -0
  66. atlas/infrastructure/transport/websocket_connection_adapter.py +33 -0
  67. atlas/init_cli.py +226 -0
  68. atlas/interfaces/__init__.py +15 -0
  69. atlas/interfaces/events.py +134 -0
  70. atlas/interfaces/llm.py +54 -0
  71. atlas/interfaces/rag.py +40 -0
  72. atlas/interfaces/sessions.py +75 -0
  73. atlas/interfaces/tools.py +57 -0
  74. atlas/interfaces/transport.py +24 -0
  75. atlas/main.py +564 -0
  76. atlas/mcp/api_key_demo/README.md +76 -0
  77. atlas/mcp/api_key_demo/main.py +172 -0
  78. atlas/mcp/api_key_demo/run.sh +56 -0
  79. atlas/mcp/basictable/main.py +147 -0
  80. atlas/mcp/calculator/main.py +149 -0
  81. atlas/mcp/code-executor/execution_engine.py +98 -0
  82. atlas/mcp/code-executor/execution_environment.py +95 -0
  83. atlas/mcp/code-executor/main.py +528 -0
  84. atlas/mcp/code-executor/result_processing.py +276 -0
  85. atlas/mcp/code-executor/script_generation.py +195 -0
  86. atlas/mcp/code-executor/security_checker.py +140 -0
  87. atlas/mcp/corporate_cars/main.py +437 -0
  88. atlas/mcp/csv_reporter/main.py +545 -0
  89. atlas/mcp/duckduckgo/main.py +182 -0
  90. atlas/mcp/elicitation_demo/README.md +171 -0
  91. atlas/mcp/elicitation_demo/main.py +262 -0
  92. atlas/mcp/env-demo/README.md +158 -0
  93. atlas/mcp/env-demo/main.py +199 -0
  94. atlas/mcp/file_size_test/main.py +284 -0
  95. atlas/mcp/filesystem/main.py +348 -0
  96. atlas/mcp/image_demo/main.py +113 -0
  97. atlas/mcp/image_demo/requirements.txt +4 -0
  98. atlas/mcp/logging_demo/README.md +72 -0
  99. atlas/mcp/logging_demo/main.py +103 -0
  100. atlas/mcp/many_tools_demo/main.py +50 -0
  101. atlas/mcp/order_database/__init__.py +0 -0
  102. atlas/mcp/order_database/main.py +369 -0
  103. atlas/mcp/order_database/signal_data.csv +1001 -0
  104. atlas/mcp/pdfbasic/main.py +394 -0
  105. atlas/mcp/pptx_generator/main.py +760 -0
  106. atlas/mcp/pptx_generator/requirements.txt +13 -0
  107. atlas/mcp/pptx_generator/run_test.sh +1 -0
  108. atlas/mcp/pptx_generator/test_pptx_generator_security.py +169 -0
  109. atlas/mcp/progress_demo/main.py +167 -0
  110. atlas/mcp/progress_updates_demo/QUICKSTART.md +273 -0
  111. atlas/mcp/progress_updates_demo/README.md +120 -0
  112. atlas/mcp/progress_updates_demo/main.py +497 -0
  113. atlas/mcp/prompts/main.py +222 -0
  114. atlas/mcp/public_demo/main.py +189 -0
  115. atlas/mcp/sampling_demo/README.md +169 -0
  116. atlas/mcp/sampling_demo/main.py +234 -0
  117. atlas/mcp/thinking/main.py +77 -0
  118. atlas/mcp/tool_planner/main.py +240 -0
  119. atlas/mcp/ui-demo/badmesh.png +0 -0
  120. atlas/mcp/ui-demo/main.py +383 -0
  121. atlas/mcp/ui-demo/templates/button_demo.html +32 -0
  122. atlas/mcp/ui-demo/templates/data_visualization.html +32 -0
  123. atlas/mcp/ui-demo/templates/form_demo.html +28 -0
  124. atlas/mcp/username-override-demo/README.md +320 -0
  125. atlas/mcp/username-override-demo/main.py +308 -0
  126. atlas/modules/__init__.py +0 -0
  127. atlas/modules/config/__init__.py +34 -0
  128. atlas/modules/config/cli.py +231 -0
  129. atlas/modules/config/config_manager.py +1096 -0
  130. atlas/modules/file_storage/__init__.py +22 -0
  131. atlas/modules/file_storage/cli.py +330 -0
  132. atlas/modules/file_storage/content_extractor.py +290 -0
  133. atlas/modules/file_storage/manager.py +295 -0
  134. atlas/modules/file_storage/mock_s3_client.py +402 -0
  135. atlas/modules/file_storage/s3_client.py +417 -0
  136. atlas/modules/llm/__init__.py +19 -0
  137. atlas/modules/llm/caller.py +287 -0
  138. atlas/modules/llm/litellm_caller.py +675 -0
  139. atlas/modules/llm/models.py +19 -0
  140. atlas/modules/mcp_tools/__init__.py +17 -0
  141. atlas/modules/mcp_tools/client.py +2123 -0
  142. atlas/modules/mcp_tools/token_storage.py +556 -0
  143. atlas/modules/prompts/prompt_provider.py +130 -0
  144. atlas/modules/rag/__init__.py +24 -0
  145. atlas/modules/rag/atlas_rag_client.py +336 -0
  146. atlas/modules/rag/client.py +129 -0
  147. atlas/routes/admin_routes.py +865 -0
  148. atlas/routes/config_routes.py +484 -0
  149. atlas/routes/feedback_routes.py +361 -0
  150. atlas/routes/files_routes.py +274 -0
  151. atlas/routes/health_routes.py +40 -0
  152. atlas/routes/mcp_auth_routes.py +223 -0
  153. atlas/server_cli.py +164 -0
  154. atlas/tests/conftest.py +20 -0
  155. atlas/tests/integration/test_mcp_auth_integration.py +152 -0
  156. atlas/tests/manual_test_sampling.py +87 -0
  157. atlas/tests/modules/mcp_tools/test_client_auth.py +226 -0
  158. atlas/tests/modules/mcp_tools/test_client_env.py +191 -0
  159. atlas/tests/test_admin_mcp_server_management_routes.py +141 -0
  160. atlas/tests/test_agent_roa.py +135 -0
  161. atlas/tests/test_app_factory_smoke.py +47 -0
  162. atlas/tests/test_approval_manager.py +439 -0
  163. atlas/tests/test_atlas_client.py +188 -0
  164. atlas/tests/test_atlas_rag_client.py +447 -0
  165. atlas/tests/test_atlas_rag_integration.py +224 -0
  166. atlas/tests/test_attach_file_flow.py +287 -0
  167. atlas/tests/test_auth_utils.py +165 -0
  168. atlas/tests/test_backend_public_url.py +185 -0
  169. atlas/tests/test_banner_logging.py +287 -0
  170. atlas/tests/test_capability_tokens_and_injection.py +203 -0
  171. atlas/tests/test_compliance_level.py +54 -0
  172. atlas/tests/test_compliance_manager.py +253 -0
  173. atlas/tests/test_config_manager.py +617 -0
  174. atlas/tests/test_config_manager_paths.py +12 -0
  175. atlas/tests/test_core_auth.py +18 -0
  176. atlas/tests/test_core_utils.py +190 -0
  177. atlas/tests/test_docker_env_sync.py +202 -0
  178. atlas/tests/test_domain_errors.py +329 -0
  179. atlas/tests/test_domain_whitelist.py +359 -0
  180. atlas/tests/test_elicitation_manager.py +408 -0
  181. atlas/tests/test_elicitation_routing.py +296 -0
  182. atlas/tests/test_env_demo_server.py +88 -0
  183. atlas/tests/test_error_classification.py +113 -0
  184. atlas/tests/test_error_flow_integration.py +116 -0
  185. atlas/tests/test_feedback_routes.py +333 -0
  186. atlas/tests/test_file_content_extraction.py +1134 -0
  187. atlas/tests/test_file_extraction_routes.py +158 -0
  188. atlas/tests/test_file_library.py +107 -0
  189. atlas/tests/test_file_manager_unit.py +18 -0
  190. atlas/tests/test_health_route.py +49 -0
  191. atlas/tests/test_http_client_stub.py +8 -0
  192. atlas/tests/test_imports_smoke.py +30 -0
  193. atlas/tests/test_interfaces_llm_response.py +9 -0
  194. atlas/tests/test_issue_access_denied_fix.py +136 -0
  195. atlas/tests/test_llm_env_expansion.py +836 -0
  196. atlas/tests/test_log_level_sensitive_data.py +285 -0
  197. atlas/tests/test_mcp_auth_routes.py +341 -0
  198. atlas/tests/test_mcp_client_auth.py +331 -0
  199. atlas/tests/test_mcp_data_injection.py +270 -0
  200. atlas/tests/test_mcp_get_authorized_servers.py +95 -0
  201. atlas/tests/test_mcp_hot_reload.py +512 -0
  202. atlas/tests/test_mcp_image_content.py +424 -0
  203. atlas/tests/test_mcp_logging.py +172 -0
  204. atlas/tests/test_mcp_progress_updates.py +313 -0
  205. atlas/tests/test_mcp_prompt_override_system_prompt.py +102 -0
  206. atlas/tests/test_mcp_prompts_server.py +39 -0
  207. atlas/tests/test_mcp_tool_result_parsing.py +296 -0
  208. atlas/tests/test_metrics_logger.py +56 -0
  209. atlas/tests/test_middleware_auth.py +379 -0
  210. atlas/tests/test_prompt_risk_and_acl.py +141 -0
  211. atlas/tests/test_rag_mcp_aggregator.py +204 -0
  212. atlas/tests/test_rag_mcp_service.py +224 -0
  213. atlas/tests/test_rate_limit_middleware.py +45 -0
  214. atlas/tests/test_routes_config_smoke.py +60 -0
  215. atlas/tests/test_routes_files_download_token.py +41 -0
  216. atlas/tests/test_routes_files_health.py +18 -0
  217. atlas/tests/test_runtime_imports.py +53 -0
  218. atlas/tests/test_sampling_integration.py +482 -0
  219. atlas/tests/test_security_admin_routes.py +61 -0
  220. atlas/tests/test_security_capability_tokens.py +65 -0
  221. atlas/tests/test_security_file_stats_scope.py +21 -0
  222. atlas/tests/test_security_header_injection.py +191 -0
  223. atlas/tests/test_security_headers_and_filename.py +63 -0
  224. atlas/tests/test_shared_session_repository.py +101 -0
  225. atlas/tests/test_system_prompt_loading.py +181 -0
  226. atlas/tests/test_token_storage.py +505 -0
  227. atlas/tests/test_tool_approval_config.py +93 -0
  228. atlas/tests/test_tool_approval_utils.py +356 -0
  229. atlas/tests/test_tool_authorization_group_filtering.py +223 -0
  230. atlas/tests/test_tool_details_in_config.py +108 -0
  231. atlas/tests/test_tool_planner.py +300 -0
  232. atlas/tests/test_unified_rag_service.py +398 -0
  233. atlas/tests/test_username_override_in_approval.py +258 -0
  234. atlas/tests/test_websocket_auth_header.py +168 -0
  235. atlas/version.py +6 -0
  236. atlas_chat-0.1.0.data/data/.env.example +253 -0
  237. atlas_chat-0.1.0.data/data/config/defaults/compliance-levels.json +44 -0
  238. atlas_chat-0.1.0.data/data/config/defaults/domain-whitelist.json +123 -0
  239. atlas_chat-0.1.0.data/data/config/defaults/file-extractors.json +74 -0
  240. atlas_chat-0.1.0.data/data/config/defaults/help-config.json +198 -0
  241. atlas_chat-0.1.0.data/data/config/defaults/llmconfig-buggy.yml +11 -0
  242. atlas_chat-0.1.0.data/data/config/defaults/llmconfig.yml +19 -0
  243. atlas_chat-0.1.0.data/data/config/defaults/mcp.json +138 -0
  244. atlas_chat-0.1.0.data/data/config/defaults/rag-sources.json +17 -0
  245. atlas_chat-0.1.0.data/data/config/defaults/splash-config.json +16 -0
  246. atlas_chat-0.1.0.dist-info/METADATA +236 -0
  247. atlas_chat-0.1.0.dist-info/RECORD +250 -0
  248. atlas_chat-0.1.0.dist-info/WHEEL +5 -0
  249. atlas_chat-0.1.0.dist-info/entry_points.txt +4 -0
  250. atlas_chat-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,234 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Sampling Demo MCP Server using FastMCP
4
+
5
+ This server demonstrates LLM sampling capabilities - requesting
6
+ LLM text generation from the client during tool execution.
7
+
8
+ Supports:
9
+ - Basic text generation
10
+ - Multi-turn conversations
11
+ - System prompts
12
+ - Temperature control
13
+ - Model preferences
14
+ - Agentic workflows with tool use
15
+ """
16
+
17
+ from fastmcp import Context, FastMCP
18
+
19
+ # Initialize the MCP server
20
+ mcp = FastMCP("Sampling Demo")
21
+
22
+
23
+ @mcp.tool
24
+ async def summarize_text(text: str, ctx: Context) -> str:
25
+ """
26
+ Summarize the provided text using LLM sampling.
27
+
28
+ This tool demonstrates basic LLM sampling - requesting the LLM
29
+ to generate a summary of the given text.
30
+
31
+ Args:
32
+ text: The text to summarize
33
+
34
+ Returns:
35
+ A summary of the input text
36
+ """
37
+ result = await ctx.sample(f"Please provide a concise summary of the following text:\n\n{text}")
38
+ return result.text or "Unable to generate summary"
39
+
40
+
41
+ @mcp.tool
42
+ async def analyze_sentiment(text: str, ctx: Context) -> str:
43
+ """
44
+ Analyze the sentiment of the provided text using LLM sampling.
45
+
46
+ This tool demonstrates sampling with a system prompt that establishes
47
+ the LLM's role as a sentiment analyzer.
48
+
49
+ Args:
50
+ text: The text to analyze
51
+
52
+ Returns:
53
+ Sentiment analysis result
54
+ """
55
+ result = await ctx.sample(
56
+ messages=f"Analyze the sentiment of this text: {text}",
57
+ system_prompt="You are a sentiment analysis expert. Provide clear, concise sentiment analysis with reasoning.",
58
+ temperature=0.3 # Lower temperature for more consistent analysis
59
+ )
60
+ return result.text or "Unable to analyze sentiment"
61
+
62
+
63
+ @mcp.tool
64
+ async def generate_code(description: str, language: str, ctx: Context) -> str:
65
+ """
66
+ Generate code based on a description using LLM sampling.
67
+
68
+ This tool demonstrates sampling with model preferences - hinting
69
+ which models should be used for code generation.
70
+
71
+ Args:
72
+ description: Description of what the code should do
73
+ language: Programming language to use
74
+
75
+ Returns:
76
+ Generated code
77
+ """
78
+ result = await ctx.sample(
79
+ messages=f"Generate {language} code that does the following: {description}",
80
+ system_prompt=f"You are an expert {language} programmer. Write clean, well-commented code.",
81
+ temperature=0.7,
82
+ max_tokens=1000,
83
+ model_preferences=["gpt-4", "claude-3-sonnet", "gpt-3.5-turbo"] # Prefer larger models for code
84
+ )
85
+ return result.text or "Unable to generate code"
86
+
87
+
88
+ @mcp.tool
89
+ async def creative_story(prompt: str, ctx: Context) -> str:
90
+ """
91
+ Generate a creative story using LLM sampling with high temperature.
92
+
93
+ This tool demonstrates sampling with higher temperature for more
94
+ creative and varied outputs.
95
+
96
+ Args:
97
+ prompt: Story prompt or theme
98
+
99
+ Returns:
100
+ Generated creative story
101
+ """
102
+ result = await ctx.sample(
103
+ messages=f"Write a creative short story based on this prompt: {prompt}",
104
+ system_prompt="You are a creative fiction writer known for imaginative storytelling.",
105
+ temperature=0.9, # High temperature for creativity
106
+ max_tokens=500
107
+ )
108
+ return result.text or "Unable to generate story"
109
+
110
+
111
+ @mcp.tool
112
+ async def multi_turn_conversation(topic: str, ctx: Context) -> str:
113
+ """
114
+ Have a multi-turn conversation about a topic using LLM sampling.
115
+
116
+ This tool demonstrates multi-turn sampling - building up a conversation
117
+ history and maintaining context across multiple sampling calls.
118
+
119
+ Args:
120
+ topic: The topic to discuss
121
+
122
+ Returns:
123
+ Summary of the conversation
124
+ """
125
+ from mcp.types import SamplingMessage, TextContent
126
+
127
+ # Start with an introduction
128
+ messages = [
129
+ SamplingMessage(
130
+ role="user",
131
+ content=TextContent(type="text", text=f"Let's discuss {topic}. What are the key aspects to consider?")
132
+ )
133
+ ]
134
+
135
+ # First sampling: Get initial response
136
+ result1 = await ctx.sample(
137
+ messages=messages,
138
+ system_prompt="You are a knowledgeable assistant engaging in a thoughtful discussion.",
139
+ temperature=0.7
140
+ )
141
+
142
+ # Add response to history
143
+ messages.append(
144
+ SamplingMessage(
145
+ role="assistant",
146
+ content=TextContent(type="text", text=result1.text)
147
+ )
148
+ )
149
+
150
+ # Second turn: Ask a follow-up question
151
+ messages.append(
152
+ SamplingMessage(
153
+ role="user",
154
+ content=TextContent(type="text", text="That's interesting. Can you elaborate on the most important point?")
155
+ )
156
+ )
157
+
158
+ # Second sampling: Get follow-up response
159
+ result2 = await ctx.sample(
160
+ messages=messages,
161
+ temperature=0.7
162
+ )
163
+
164
+ # Return summary of conversation
165
+ return f"**Discussion on {topic}**\n\n**Initial Response:**\n{result1.text}\n\n**Follow-up:**\n{result2.text}"
166
+
167
+
168
+ @mcp.tool
169
+ async def research_question(question: str, ctx: Context) -> str:
170
+ """
171
+ Research a question using multiple LLM sampling calls.
172
+
173
+ This tool demonstrates agentic behavior - using sampling to break down
174
+ a complex question, gather information, and synthesize an answer.
175
+
176
+ Args:
177
+ question: The research question to answer
178
+
179
+ Returns:
180
+ Comprehensive research-based answer
181
+ """
182
+ # Step 1: Break down the question
183
+ breakdown_result = await ctx.sample(
184
+ messages=f"Break down this question into 3 key sub-questions that need to be answered: {question}",
185
+ system_prompt="You are a research assistant. Identify key sub-questions clearly and concisely.",
186
+ temperature=0.5,
187
+ max_tokens=200
188
+ )
189
+
190
+ # Step 2: Research each aspect
191
+ research_result = await ctx.sample(
192
+ messages=f"Based on these sub-questions:\n{breakdown_result.text}\n\nProvide a comprehensive answer to the original question: {question}",
193
+ system_prompt="You are a research expert. Provide well-reasoned, evidence-based answers.",
194
+ temperature=0.6,
195
+ max_tokens=800
196
+ )
197
+
198
+ return f"**Research Question:** {question}\n\n**Analysis:**\n{breakdown_result.text}\n\n**Answer:**\n{research_result.text}"
199
+
200
+
201
+ @mcp.tool
202
+ async def translate_and_explain(text: str, target_language: str, ctx: Context) -> str:
203
+ """
204
+ Translate text and explain translation choices using LLM sampling.
205
+
206
+ This tool demonstrates sequential sampling for multi-step tasks.
207
+
208
+ Args:
209
+ text: Text to translate
210
+ target_language: Target language for translation
211
+
212
+ Returns:
213
+ Translation with explanation
214
+ """
215
+ # Step 1: Translate
216
+ translation_result = await ctx.sample(
217
+ messages=f"Translate the following text to {target_language}:\n\n{text}",
218
+ system_prompt=f"You are an expert translator. Provide accurate {target_language} translations.",
219
+ temperature=0.3
220
+ )
221
+
222
+ # Step 2: Explain translation choices
223
+ explanation_result = await ctx.sample(
224
+ messages=f"Explain the key translation choices made in translating to {target_language}:\n\nOriginal: {text}\n\nTranslation: {translation_result.text}",
225
+ system_prompt="You are a translation expert. Explain translation decisions clearly.",
226
+ temperature=0.4,
227
+ max_tokens=300
228
+ )
229
+
230
+ return f"**Translation to {target_language}:**\n{translation_result.text}\n\n**Translation Notes:**\n{explanation_result.text}"
231
+
232
+
233
+ if __name__ == "__main__":
234
+ mcp.run()
@@ -0,0 +1,77 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Thinking MCP Server using FastMCP
4
+ Provides a thinking tool that processes thoughts and breaks down problems step by step.
5
+ """
6
+
7
+ from typing import Any, Dict, List
8
+
9
+ from fastmcp import FastMCP
10
+
11
+ # Initialize the MCP server
12
+ mcp = FastMCP("Thinking")
13
+
14
+
15
+ @mcp.tool
16
+ def thinking(list_of_thoughts: List[str]) -> Dict[str, Any]:
17
+ """
18
+ Structured thinking and ideation tool for organizing thoughts, brainstorming, and problem-solving analysis.
19
+
20
+ This cognitive assistance tool helps organize and process complex thinking patterns:
21
+
22
+ **Thought Organization:**
23
+ - Structured collection and validation of multiple ideas
24
+ - Thought counting and enumeration for analysis
25
+ - List-based organization for sequential thinking
26
+ - Input validation and error handling for robustness
27
+
28
+ **Cognitive Support:**
29
+ - Brainstorming session facilitation
30
+ - Problem decomposition and analysis
31
+ - Creative thinking process documentation
32
+ - Decision-making support through thought structuring
33
+
34
+ **Analysis Features:**
35
+ - Thought count and quantity analysis
36
+ - Input validation and quality checks
37
+ - Structured output for further processing
38
+ - Error handling for incomplete inputs
39
+
40
+ **Use Cases:**
41
+ - Strategic planning and decision making
42
+ - Creative brainstorming sessions
43
+ - Problem-solving workshops
44
+ - Research idea generation
45
+ - Project planning and ideation
46
+ - Learning and knowledge organization
47
+
48
+ **Workflow Integration:**
49
+ - Compatible with other analytical tools
50
+ - Structured output for downstream processing
51
+ - Integration with documentation systems
52
+ - Support for iterative thinking processes
53
+
54
+ Args:
55
+ list_of_thoughts: Collection of thoughts, ideas, or concepts to organize (array of strings)
56
+
57
+ Returns:
58
+ Dictionary containing:
59
+ - thoughts: Organized list of all provided thoughts
60
+ - count: Total number of thoughts processed
61
+ Or error message if no thoughts provided or processing fails
62
+ """
63
+ try:
64
+ if not list_of_thoughts:
65
+ return {"results": {"error": "No thoughts provided"}}
66
+
67
+ return {"results": {
68
+ "thoughts": list_of_thoughts,
69
+ "count": len(list_of_thoughts)
70
+ }}
71
+
72
+ except Exception as e:
73
+ return {"results": {"error": f"Error processing thoughts: {str(e)}"}}
74
+
75
+
76
+ if __name__ == "__main__":
77
+ mcp.run()
@@ -0,0 +1,240 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tool Planner MCP Server using FastMCP.
4
+
5
+ Uses _mcp_data injection and MCP sampling to generate bash scripts that
6
+ accomplish a user's task by calling the Atlas CLI with the appropriate tools.
7
+
8
+ Flow:
9
+ 1. User asks the LLM to plan a task (e.g., "create a powerpoint about dogs")
10
+ 2. LLM calls plan_with_tools, passing the user's request as `task`
11
+ 3. Atlas UI injects `_mcp_data` (all available tools metadata) and `username`
12
+ 4. Inside the tool:
13
+ a. Convert `_mcp_data` into an LLM-friendly CLI tool reference
14
+ b. Use ctx.sample() to ask the LLM to write a bash script using atlas_chat_cli.py
15
+ c. Return the generated bash script
16
+ """
17
+
18
+ import base64
19
+ import re
20
+ from typing import Any, Dict, Optional
21
+
22
+ from fastmcp import Context, FastMCP
23
+
24
+ mcp = FastMCP("Tool Planner")
25
+
26
+
27
+ PLANNER_SYSTEM_PROMPT = """\
28
+ You are a task planner. Given a user's request and a list of available CLI tools,
29
+ write a bash script that accomplishes the task using sequential calls to:
30
+
31
+ python atlas_chat_cli.py "instruction" --tools tool_name
32
+
33
+ Rules:
34
+ - Each step is a plain CLI call with a natural language instruction and the right --tools flag
35
+ - Use -o filename.txt to save output to a file when a later step needs it
36
+ - Use loops (for/while) for repetitive operations
37
+ - Include set -e for error checking
38
+ - The script should be self-contained and runnable from the backend/ directory
39
+ - Do NOT use --json unless parsing structured output is truly necessary
40
+ - Keep instructions to the LLM clear and specific in each CLI call
41
+ - Be concise: at most 2 short comment lines between commands, no lengthy explanations
42
+ - The script should be easy to read at a glance
43
+ """
44
+
45
+
46
+ def format_tools_for_llm(mcp_data: Dict[str, Any]) -> str:
47
+ """Convert _mcp_data into a human-readable CLI tool reference.
48
+
49
+ Produces a text block describing each server and its tools with
50
+ parameter details, suitable for inclusion in an LLM prompt.
51
+
52
+ Args:
53
+ mcp_data: The _mcp_data dict injected by Atlas UI, containing
54
+ an ``available_servers`` list.
55
+
56
+ Returns:
57
+ A formatted multi-line string describing available tools.
58
+ """
59
+ servers = mcp_data.get("available_servers", [])
60
+ if not servers:
61
+ return "(No tools available)"
62
+
63
+ lines: list[str] = []
64
+ for server in servers:
65
+ server_name = server.get("server_name", "unknown")
66
+ server_desc = server.get("description", "")
67
+ desc_part = f" ({server_desc})" if server_desc else ""
68
+ lines.append(f"Server: {server_name}{desc_part}")
69
+
70
+ for tool in server.get("tools", []):
71
+ tool_name = tool.get("name", "unknown")
72
+ tool_desc = tool.get("description", "")
73
+ lines.append(f" Tool: {tool_name}")
74
+ if tool_desc:
75
+ lines.append(f" Description: {tool_desc}")
76
+
77
+ params = tool.get("parameters", {})
78
+ properties = params.get("properties", {})
79
+ required = set(params.get("required", []))
80
+
81
+ if properties:
82
+ lines.append(" Parameters:")
83
+ for param_name, param_schema in properties.items():
84
+ if param_name.startswith("_"):
85
+ continue
86
+ param_type = param_schema.get("type", "any")
87
+ req_label = "required" if param_name in required else "optional"
88
+ param_desc = param_schema.get("description", "")
89
+ desc_suffix = f": {param_desc}" if param_desc else ""
90
+ lines.append(
91
+ f" - {param_name} ({param_type}, {req_label}){desc_suffix}"
92
+ )
93
+
94
+ lines.append("")
95
+
96
+ return "\n".join(lines)
97
+
98
+
99
+ def build_planning_prompt(task: str, tools_reference: str) -> str:
100
+ """Build the user message for the sampling call.
101
+
102
+ Combines the task description, CLI usage instructions, and the
103
+ formatted tools reference into a single prompt.
104
+
105
+ Args:
106
+ task: The user's task description.
107
+ tools_reference: Output of ``format_tools_for_llm()``.
108
+
109
+ Returns:
110
+ The complete user-message string for ctx.sample().
111
+ """
112
+ return (
113
+ f"Task: {task}\n\n"
114
+ f"Available tools:\n{tools_reference}\n\n"
115
+ "CLI usage:\n"
116
+ ' python atlas_chat_cli.py "instruction for the LLM" --tools tool_name\n\n'
117
+ "Capture output to file if needed:\n"
118
+ ' python atlas_chat_cli.py "instruction" --tools tool_name -o result.txt\n\n'
119
+ "Loops:\n"
120
+ ' for item in "a" "b" "c"; do\n'
121
+ ' python atlas_chat_cli.py "Do something with $item" --tools tool_name\n'
122
+ " done\n\n"
123
+ "Write a bash script that accomplishes the task."
124
+ )
125
+
126
+
127
+ def _sanitize_filename(task: str, max_length: int = 40) -> str:
128
+ """Derive a safe filename from the task description."""
129
+ slug = re.sub(r"[^\w\s-]", "", task.lower())
130
+ slug = re.sub(r"[\s_-]+", "_", slug).strip("_")
131
+ return slug[:max_length] if slug else "plan"
132
+
133
+
134
+ def _build_artifact_response(
135
+ script_text: str, task: str
136
+ ) -> Dict[str, Any]:
137
+ """Wrap a script in the Atlas artifact download format.
138
+
139
+ Returns a dict with ``results``, ``artifacts``, and ``display``
140
+ matching the convention used by pptx_generator and csv_reporter.
141
+ """
142
+ filename = f"{_sanitize_filename(task)}.sh"
143
+ script_b64 = base64.b64encode(script_text.encode("utf-8")).decode("utf-8")
144
+
145
+ return {
146
+ "results": {
147
+ "operation": "plan_with_tools",
148
+ "task": task,
149
+ "filename": filename,
150
+ "message": "Bash script plan generated.",
151
+ },
152
+ "artifacts": [
153
+ {
154
+ "name": filename,
155
+ "b64": script_b64,
156
+ "mime": "application/x-sh",
157
+ "viewer": "code",
158
+ }
159
+ ],
160
+ "display": {
161
+ "open_canvas": True,
162
+ "primary_file": filename,
163
+ "mode": "replace",
164
+ "viewer_hint": "code",
165
+ },
166
+ }
167
+
168
+
169
+ @mcp.tool
170
+ async def plan_with_tools(
171
+ task: str,
172
+ _mcp_data: Optional[Dict[str, Any]] = None,
173
+ username: Optional[str] = None,
174
+ ctx: Context = None,
175
+ ) -> Dict[str, Any]:
176
+ """Plan how to accomplish a task by generating a bash script that uses the Atlas CLI.
177
+
178
+ This tool receives metadata about all available MCP tools via _mcp_data
179
+ injection, then uses LLM sampling to produce a runnable bash script.
180
+ Each step in the script calls atlas_chat_cli.py with the appropriate
181
+ --tools flag. The script is returned as a downloadable .sh file.
182
+
183
+ Args:
184
+ task: Description of the task to accomplish.
185
+ _mcp_data: Automatically injected by Atlas UI with available tool
186
+ metadata. Do not provide this manually.
187
+ username: The authenticated user (automatically injected by Atlas UI).
188
+
189
+ Returns:
190
+ Atlas artifact dict with the bash script as a downloadable file.
191
+ """
192
+ mcp_data = _mcp_data or {}
193
+ tools_reference = format_tools_for_llm(mcp_data)
194
+ user_message = build_planning_prompt(task, tools_reference)
195
+
196
+ if ctx is None:
197
+ fallback = (
198
+ f"#!/bin/bash\nset -e\n"
199
+ f"# Sampling unavailable -- cannot generate plan.\n"
200
+ f"# Task: {task}\n"
201
+ f"# Tools reference:\n{tools_reference}"
202
+ )
203
+ return _build_artifact_response(fallback, task)
204
+
205
+ server_count = len(mcp_data.get("available_servers", []))
206
+ await ctx.report_progress(
207
+ progress=0, total=3,
208
+ message=f"Discovered {server_count} servers, building prompt...",
209
+ )
210
+
211
+ await ctx.report_progress(
212
+ progress=1, total=3,
213
+ message="Asking LLM to generate bash script...",
214
+ )
215
+
216
+ result = await ctx.sample(
217
+ messages=user_message,
218
+ system_prompt=PLANNER_SYSTEM_PROMPT,
219
+ temperature=0.3,
220
+ max_tokens=10000,
221
+ )
222
+
223
+ await ctx.report_progress(
224
+ progress=2, total=3,
225
+ message="Packaging script as downloadable artifact...",
226
+ )
227
+
228
+ script_text = result.text or "#!/bin/bash\nset -e\n# Unable to generate plan"
229
+ response = _build_artifact_response(script_text, task)
230
+
231
+ await ctx.report_progress(
232
+ progress=3, total=3,
233
+ message="Done.",
234
+ )
235
+
236
+ return response
237
+
238
+
239
+ if __name__ == "__main__":
240
+ mcp.run()
Binary file