atlas-chat 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (250) hide show
  1. atlas/__init__.py +40 -0
  2. atlas/application/__init__.py +7 -0
  3. atlas/application/chat/__init__.py +7 -0
  4. atlas/application/chat/agent/__init__.py +10 -0
  5. atlas/application/chat/agent/act_loop.py +179 -0
  6. atlas/application/chat/agent/factory.py +142 -0
  7. atlas/application/chat/agent/protocols.py +46 -0
  8. atlas/application/chat/agent/react_loop.py +338 -0
  9. atlas/application/chat/agent/think_act_loop.py +171 -0
  10. atlas/application/chat/approval_manager.py +151 -0
  11. atlas/application/chat/elicitation_manager.py +191 -0
  12. atlas/application/chat/events/__init__.py +1 -0
  13. atlas/application/chat/events/agent_event_relay.py +112 -0
  14. atlas/application/chat/modes/__init__.py +1 -0
  15. atlas/application/chat/modes/agent.py +125 -0
  16. atlas/application/chat/modes/plain.py +74 -0
  17. atlas/application/chat/modes/rag.py +81 -0
  18. atlas/application/chat/modes/tools.py +179 -0
  19. atlas/application/chat/orchestrator.py +213 -0
  20. atlas/application/chat/policies/__init__.py +1 -0
  21. atlas/application/chat/policies/tool_authorization.py +99 -0
  22. atlas/application/chat/preprocessors/__init__.py +1 -0
  23. atlas/application/chat/preprocessors/message_builder.py +92 -0
  24. atlas/application/chat/preprocessors/prompt_override_service.py +104 -0
  25. atlas/application/chat/service.py +454 -0
  26. atlas/application/chat/utilities/__init__.py +6 -0
  27. atlas/application/chat/utilities/error_handler.py +367 -0
  28. atlas/application/chat/utilities/event_notifier.py +546 -0
  29. atlas/application/chat/utilities/file_processor.py +613 -0
  30. atlas/application/chat/utilities/tool_executor.py +789 -0
  31. atlas/atlas_chat_cli.py +347 -0
  32. atlas/atlas_client.py +238 -0
  33. atlas/core/__init__.py +0 -0
  34. atlas/core/auth.py +205 -0
  35. atlas/core/authorization_manager.py +27 -0
  36. atlas/core/capabilities.py +123 -0
  37. atlas/core/compliance.py +215 -0
  38. atlas/core/domain_whitelist.py +147 -0
  39. atlas/core/domain_whitelist_middleware.py +82 -0
  40. atlas/core/http_client.py +28 -0
  41. atlas/core/log_sanitizer.py +102 -0
  42. atlas/core/metrics_logger.py +59 -0
  43. atlas/core/middleware.py +131 -0
  44. atlas/core/otel_config.py +242 -0
  45. atlas/core/prompt_risk.py +200 -0
  46. atlas/core/rate_limit.py +0 -0
  47. atlas/core/rate_limit_middleware.py +64 -0
  48. atlas/core/security_headers_middleware.py +51 -0
  49. atlas/domain/__init__.py +37 -0
  50. atlas/domain/chat/__init__.py +1 -0
  51. atlas/domain/chat/dtos.py +85 -0
  52. atlas/domain/errors.py +96 -0
  53. atlas/domain/messages/__init__.py +12 -0
  54. atlas/domain/messages/models.py +160 -0
  55. atlas/domain/rag_mcp_service.py +664 -0
  56. atlas/domain/sessions/__init__.py +7 -0
  57. atlas/domain/sessions/models.py +36 -0
  58. atlas/domain/unified_rag_service.py +371 -0
  59. atlas/infrastructure/__init__.py +10 -0
  60. atlas/infrastructure/app_factory.py +135 -0
  61. atlas/infrastructure/events/__init__.py +1 -0
  62. atlas/infrastructure/events/cli_event_publisher.py +140 -0
  63. atlas/infrastructure/events/websocket_publisher.py +140 -0
  64. atlas/infrastructure/sessions/in_memory_repository.py +56 -0
  65. atlas/infrastructure/transport/__init__.py +7 -0
  66. atlas/infrastructure/transport/websocket_connection_adapter.py +33 -0
  67. atlas/init_cli.py +226 -0
  68. atlas/interfaces/__init__.py +15 -0
  69. atlas/interfaces/events.py +134 -0
  70. atlas/interfaces/llm.py +54 -0
  71. atlas/interfaces/rag.py +40 -0
  72. atlas/interfaces/sessions.py +75 -0
  73. atlas/interfaces/tools.py +57 -0
  74. atlas/interfaces/transport.py +24 -0
  75. atlas/main.py +564 -0
  76. atlas/mcp/api_key_demo/README.md +76 -0
  77. atlas/mcp/api_key_demo/main.py +172 -0
  78. atlas/mcp/api_key_demo/run.sh +56 -0
  79. atlas/mcp/basictable/main.py +147 -0
  80. atlas/mcp/calculator/main.py +149 -0
  81. atlas/mcp/code-executor/execution_engine.py +98 -0
  82. atlas/mcp/code-executor/execution_environment.py +95 -0
  83. atlas/mcp/code-executor/main.py +528 -0
  84. atlas/mcp/code-executor/result_processing.py +276 -0
  85. atlas/mcp/code-executor/script_generation.py +195 -0
  86. atlas/mcp/code-executor/security_checker.py +140 -0
  87. atlas/mcp/corporate_cars/main.py +437 -0
  88. atlas/mcp/csv_reporter/main.py +545 -0
  89. atlas/mcp/duckduckgo/main.py +182 -0
  90. atlas/mcp/elicitation_demo/README.md +171 -0
  91. atlas/mcp/elicitation_demo/main.py +262 -0
  92. atlas/mcp/env-demo/README.md +158 -0
  93. atlas/mcp/env-demo/main.py +199 -0
  94. atlas/mcp/file_size_test/main.py +284 -0
  95. atlas/mcp/filesystem/main.py +348 -0
  96. atlas/mcp/image_demo/main.py +113 -0
  97. atlas/mcp/image_demo/requirements.txt +4 -0
  98. atlas/mcp/logging_demo/README.md +72 -0
  99. atlas/mcp/logging_demo/main.py +103 -0
  100. atlas/mcp/many_tools_demo/main.py +50 -0
  101. atlas/mcp/order_database/__init__.py +0 -0
  102. atlas/mcp/order_database/main.py +369 -0
  103. atlas/mcp/order_database/signal_data.csv +1001 -0
  104. atlas/mcp/pdfbasic/main.py +394 -0
  105. atlas/mcp/pptx_generator/main.py +760 -0
  106. atlas/mcp/pptx_generator/requirements.txt +13 -0
  107. atlas/mcp/pptx_generator/run_test.sh +1 -0
  108. atlas/mcp/pptx_generator/test_pptx_generator_security.py +169 -0
  109. atlas/mcp/progress_demo/main.py +167 -0
  110. atlas/mcp/progress_updates_demo/QUICKSTART.md +273 -0
  111. atlas/mcp/progress_updates_demo/README.md +120 -0
  112. atlas/mcp/progress_updates_demo/main.py +497 -0
  113. atlas/mcp/prompts/main.py +222 -0
  114. atlas/mcp/public_demo/main.py +189 -0
  115. atlas/mcp/sampling_demo/README.md +169 -0
  116. atlas/mcp/sampling_demo/main.py +234 -0
  117. atlas/mcp/thinking/main.py +77 -0
  118. atlas/mcp/tool_planner/main.py +240 -0
  119. atlas/mcp/ui-demo/badmesh.png +0 -0
  120. atlas/mcp/ui-demo/main.py +383 -0
  121. atlas/mcp/ui-demo/templates/button_demo.html +32 -0
  122. atlas/mcp/ui-demo/templates/data_visualization.html +32 -0
  123. atlas/mcp/ui-demo/templates/form_demo.html +28 -0
  124. atlas/mcp/username-override-demo/README.md +320 -0
  125. atlas/mcp/username-override-demo/main.py +308 -0
  126. atlas/modules/__init__.py +0 -0
  127. atlas/modules/config/__init__.py +34 -0
  128. atlas/modules/config/cli.py +231 -0
  129. atlas/modules/config/config_manager.py +1096 -0
  130. atlas/modules/file_storage/__init__.py +22 -0
  131. atlas/modules/file_storage/cli.py +330 -0
  132. atlas/modules/file_storage/content_extractor.py +290 -0
  133. atlas/modules/file_storage/manager.py +295 -0
  134. atlas/modules/file_storage/mock_s3_client.py +402 -0
  135. atlas/modules/file_storage/s3_client.py +417 -0
  136. atlas/modules/llm/__init__.py +19 -0
  137. atlas/modules/llm/caller.py +287 -0
  138. atlas/modules/llm/litellm_caller.py +675 -0
  139. atlas/modules/llm/models.py +19 -0
  140. atlas/modules/mcp_tools/__init__.py +17 -0
  141. atlas/modules/mcp_tools/client.py +2123 -0
  142. atlas/modules/mcp_tools/token_storage.py +556 -0
  143. atlas/modules/prompts/prompt_provider.py +130 -0
  144. atlas/modules/rag/__init__.py +24 -0
  145. atlas/modules/rag/atlas_rag_client.py +336 -0
  146. atlas/modules/rag/client.py +129 -0
  147. atlas/routes/admin_routes.py +865 -0
  148. atlas/routes/config_routes.py +484 -0
  149. atlas/routes/feedback_routes.py +361 -0
  150. atlas/routes/files_routes.py +274 -0
  151. atlas/routes/health_routes.py +40 -0
  152. atlas/routes/mcp_auth_routes.py +223 -0
  153. atlas/server_cli.py +164 -0
  154. atlas/tests/conftest.py +20 -0
  155. atlas/tests/integration/test_mcp_auth_integration.py +152 -0
  156. atlas/tests/manual_test_sampling.py +87 -0
  157. atlas/tests/modules/mcp_tools/test_client_auth.py +226 -0
  158. atlas/tests/modules/mcp_tools/test_client_env.py +191 -0
  159. atlas/tests/test_admin_mcp_server_management_routes.py +141 -0
  160. atlas/tests/test_agent_roa.py +135 -0
  161. atlas/tests/test_app_factory_smoke.py +47 -0
  162. atlas/tests/test_approval_manager.py +439 -0
  163. atlas/tests/test_atlas_client.py +188 -0
  164. atlas/tests/test_atlas_rag_client.py +447 -0
  165. atlas/tests/test_atlas_rag_integration.py +224 -0
  166. atlas/tests/test_attach_file_flow.py +287 -0
  167. atlas/tests/test_auth_utils.py +165 -0
  168. atlas/tests/test_backend_public_url.py +185 -0
  169. atlas/tests/test_banner_logging.py +287 -0
  170. atlas/tests/test_capability_tokens_and_injection.py +203 -0
  171. atlas/tests/test_compliance_level.py +54 -0
  172. atlas/tests/test_compliance_manager.py +253 -0
  173. atlas/tests/test_config_manager.py +617 -0
  174. atlas/tests/test_config_manager_paths.py +12 -0
  175. atlas/tests/test_core_auth.py +18 -0
  176. atlas/tests/test_core_utils.py +190 -0
  177. atlas/tests/test_docker_env_sync.py +202 -0
  178. atlas/tests/test_domain_errors.py +329 -0
  179. atlas/tests/test_domain_whitelist.py +359 -0
  180. atlas/tests/test_elicitation_manager.py +408 -0
  181. atlas/tests/test_elicitation_routing.py +296 -0
  182. atlas/tests/test_env_demo_server.py +88 -0
  183. atlas/tests/test_error_classification.py +113 -0
  184. atlas/tests/test_error_flow_integration.py +116 -0
  185. atlas/tests/test_feedback_routes.py +333 -0
  186. atlas/tests/test_file_content_extraction.py +1134 -0
  187. atlas/tests/test_file_extraction_routes.py +158 -0
  188. atlas/tests/test_file_library.py +107 -0
  189. atlas/tests/test_file_manager_unit.py +18 -0
  190. atlas/tests/test_health_route.py +49 -0
  191. atlas/tests/test_http_client_stub.py +8 -0
  192. atlas/tests/test_imports_smoke.py +30 -0
  193. atlas/tests/test_interfaces_llm_response.py +9 -0
  194. atlas/tests/test_issue_access_denied_fix.py +136 -0
  195. atlas/tests/test_llm_env_expansion.py +836 -0
  196. atlas/tests/test_log_level_sensitive_data.py +285 -0
  197. atlas/tests/test_mcp_auth_routes.py +341 -0
  198. atlas/tests/test_mcp_client_auth.py +331 -0
  199. atlas/tests/test_mcp_data_injection.py +270 -0
  200. atlas/tests/test_mcp_get_authorized_servers.py +95 -0
  201. atlas/tests/test_mcp_hot_reload.py +512 -0
  202. atlas/tests/test_mcp_image_content.py +424 -0
  203. atlas/tests/test_mcp_logging.py +172 -0
  204. atlas/tests/test_mcp_progress_updates.py +313 -0
  205. atlas/tests/test_mcp_prompt_override_system_prompt.py +102 -0
  206. atlas/tests/test_mcp_prompts_server.py +39 -0
  207. atlas/tests/test_mcp_tool_result_parsing.py +296 -0
  208. atlas/tests/test_metrics_logger.py +56 -0
  209. atlas/tests/test_middleware_auth.py +379 -0
  210. atlas/tests/test_prompt_risk_and_acl.py +141 -0
  211. atlas/tests/test_rag_mcp_aggregator.py +204 -0
  212. atlas/tests/test_rag_mcp_service.py +224 -0
  213. atlas/tests/test_rate_limit_middleware.py +45 -0
  214. atlas/tests/test_routes_config_smoke.py +60 -0
  215. atlas/tests/test_routes_files_download_token.py +41 -0
  216. atlas/tests/test_routes_files_health.py +18 -0
  217. atlas/tests/test_runtime_imports.py +53 -0
  218. atlas/tests/test_sampling_integration.py +482 -0
  219. atlas/tests/test_security_admin_routes.py +61 -0
  220. atlas/tests/test_security_capability_tokens.py +65 -0
  221. atlas/tests/test_security_file_stats_scope.py +21 -0
  222. atlas/tests/test_security_header_injection.py +191 -0
  223. atlas/tests/test_security_headers_and_filename.py +63 -0
  224. atlas/tests/test_shared_session_repository.py +101 -0
  225. atlas/tests/test_system_prompt_loading.py +181 -0
  226. atlas/tests/test_token_storage.py +505 -0
  227. atlas/tests/test_tool_approval_config.py +93 -0
  228. atlas/tests/test_tool_approval_utils.py +356 -0
  229. atlas/tests/test_tool_authorization_group_filtering.py +223 -0
  230. atlas/tests/test_tool_details_in_config.py +108 -0
  231. atlas/tests/test_tool_planner.py +300 -0
  232. atlas/tests/test_unified_rag_service.py +398 -0
  233. atlas/tests/test_username_override_in_approval.py +258 -0
  234. atlas/tests/test_websocket_auth_header.py +168 -0
  235. atlas/version.py +6 -0
  236. atlas_chat-0.1.0.data/data/.env.example +253 -0
  237. atlas_chat-0.1.0.data/data/config/defaults/compliance-levels.json +44 -0
  238. atlas_chat-0.1.0.data/data/config/defaults/domain-whitelist.json +123 -0
  239. atlas_chat-0.1.0.data/data/config/defaults/file-extractors.json +74 -0
  240. atlas_chat-0.1.0.data/data/config/defaults/help-config.json +198 -0
  241. atlas_chat-0.1.0.data/data/config/defaults/llmconfig-buggy.yml +11 -0
  242. atlas_chat-0.1.0.data/data/config/defaults/llmconfig.yml +19 -0
  243. atlas_chat-0.1.0.data/data/config/defaults/mcp.json +138 -0
  244. atlas_chat-0.1.0.data/data/config/defaults/rag-sources.json +17 -0
  245. atlas_chat-0.1.0.data/data/config/defaults/splash-config.json +16 -0
  246. atlas_chat-0.1.0.dist-info/METADATA +236 -0
  247. atlas_chat-0.1.0.dist-info/RECORD +250 -0
  248. atlas_chat-0.1.0.dist-info/WHEEL +5 -0
  249. atlas_chat-0.1.0.dist-info/entry_points.txt +4 -0
  250. atlas_chat-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,338 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import json
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from atlas.domain.messages.models import ToolResult
8
+ from atlas.interfaces.llm import LLMProtocol, LLMResponse
9
+ from atlas.interfaces.tools import ToolManagerProtocol
10
+ from atlas.modules.prompts.prompt_provider import PromptProvider
11
+
12
+ from ..utilities import error_handler, file_processor, tool_executor
13
+ from .protocols import AgentContext, AgentEvent, AgentEventHandler, AgentLoopProtocol, AgentResult
14
+
15
+
16
+ class ReActAgentLoop(AgentLoopProtocol):
17
+ """Default Reason–Act–Observe agent loop extracted from ChatService._handle_agent_mode.
18
+
19
+ Behavior matches existing implementation, including:
20
+ - Reason/Observe via control tool calls with JSON fallback
21
+ - Single tool call per Act step
22
+ - Optional RAG integration
23
+ - Streaming via emitted AgentEvents (adapter maps to notification_utils)
24
+ - User input request & stop polling using connection-driven event handler
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ *,
30
+ llm: LLMProtocol,
31
+ tool_manager: Optional[ToolManagerProtocol],
32
+ prompt_provider: Optional[PromptProvider],
33
+ connection: Any = None,
34
+ config_manager=None,
35
+ ) -> None:
36
+ self.llm = llm
37
+ self.tool_manager = tool_manager
38
+ self.prompt_provider = prompt_provider
39
+ self.connection = connection
40
+ self.config_manager = config_manager
41
+ self.skip_approval = False
42
+
43
+ # ---- Internal helpers (mirroring service implementation) ----
44
+ def _latest_user_question(self, msgs: List[Dict[str, Any]]) -> str:
45
+ for m in reversed(msgs):
46
+ if m.get("role") == "user" and m.get("content"):
47
+ return str(m.get("content"))
48
+ return ""
49
+
50
+ def _extract_tool_args(self, llm_response: LLMResponse, fname: str) -> Dict[str, Any]:
51
+ try:
52
+ if not llm_response or not llm_response.tool_calls:
53
+ return {}
54
+ for tc in llm_response.tool_calls:
55
+ f = tc.get("function") if isinstance(tc, dict) else None
56
+ if f and f.get("name") == fname:
57
+ raw_args = f.get("arguments")
58
+ if isinstance(raw_args, str):
59
+ try:
60
+ return json.loads(raw_args)
61
+ except Exception:
62
+ return {}
63
+ if isinstance(raw_args, dict):
64
+ return raw_args
65
+ return {}
66
+ except Exception:
67
+ return {}
68
+
69
+ def _parse_control_json(self, text: str) -> Dict[str, Any]:
70
+ try:
71
+ return json.loads(text)
72
+ except Exception:
73
+ pass
74
+ if not isinstance(text, str):
75
+ return {}
76
+ start = text.rfind("{")
77
+ end = text.rfind("}")
78
+ if start != -1 and end != -1 and end > start:
79
+ try:
80
+ return json.loads(text[start : end + 1])
81
+ except Exception:
82
+ return {}
83
+ return {}
84
+
85
+ async def _poll_control_message(self, timeout_sec: float = 0.01) -> Optional[Dict[str, Any]]:
86
+ if not self.connection:
87
+ return None
88
+ try:
89
+ return await asyncio.wait_for(self.connection.receive_json(), timeout=timeout_sec)
90
+ except Exception:
91
+ return None
92
+
93
+ async def run(
94
+ self,
95
+ *,
96
+ model: str,
97
+ messages: List[Dict[str, Any]],
98
+ context: AgentContext,
99
+ selected_tools: Optional[List[str]],
100
+ data_sources: Optional[List[str]],
101
+ max_steps: int,
102
+ temperature: float,
103
+ event_handler: AgentEventHandler,
104
+ ) -> AgentResult:
105
+ # Agent start
106
+ await event_handler(AgentEvent(type="agent_start", payload={"max_steps": max_steps, "strategy": "react"}))
107
+
108
+ steps = 0
109
+ final_response: Optional[str] = None
110
+ last_observation: Optional[str] = None
111
+ user_question = self._latest_user_question(messages)
112
+ files_manifest_obj = file_processor.build_files_manifest({
113
+ "session_id": str(context.session_id),
114
+ "user_email": context.user_email,
115
+ "files": context.files,
116
+ **{},
117
+ })
118
+ files_manifest_text = files_manifest_obj.get("content") if files_manifest_obj else None
119
+
120
+ while steps < max_steps:
121
+ steps += 1
122
+ await event_handler(AgentEvent(type="agent_turn_start", payload={"step": steps}))
123
+
124
+ # ----- Reason -----
125
+ reason_prompt = None
126
+ if self.prompt_provider:
127
+ reason_prompt = self.prompt_provider.get_agent_reason_prompt(
128
+ user_question=user_question,
129
+ files_manifest=files_manifest_text,
130
+ last_observation=last_observation,
131
+ )
132
+ reason_messages = list(messages)
133
+ if reason_prompt:
134
+ reason_messages.append({"role": "system", "content": reason_prompt})
135
+
136
+ reason_tools_schema: List[Dict[str, Any]] = [
137
+ {
138
+ "type": "function",
139
+ "function": {
140
+ "name": "agent_decide_next",
141
+ "description": (
142
+ "Plan the next action. If you can answer now, set finish=true and provide final_answer. "
143
+ "If you need information from the user, set request_input={question: \"...\"}."
144
+ ),
145
+ "parameters": {
146
+ "type": "object",
147
+ "properties": {
148
+ "finish": {"type": "boolean"},
149
+ "final_answer": {"type": "string"},
150
+ "request_input": {
151
+ "type": "object",
152
+ "properties": {
153
+ "question": {"type": "string"}
154
+ },
155
+ "required": ["question"],
156
+ },
157
+ "next_plan": {"type": "string"},
158
+ "tools_to_consider": {"type": "array", "items": {"type": "string"}},
159
+ },
160
+ "additionalProperties": False,
161
+ },
162
+ },
163
+ }
164
+ ]
165
+
166
+ reason_resp: LLMResponse = await self.llm.call_with_tools(
167
+ model, reason_messages, reason_tools_schema, "required", temperature=temperature
168
+ )
169
+ reason_ctrl = self._extract_tool_args(reason_resp, "agent_decide_next") or self._parse_control_json(reason_resp.content)
170
+ reason_visible_text: str = reason_resp.content or ""
171
+ if not reason_ctrl:
172
+ reason_text_fallback = await self.llm.call_plain(model, reason_messages, temperature=temperature)
173
+ reason_visible_text = reason_text_fallback
174
+ reason_ctrl = self._parse_control_json(reason_text_fallback)
175
+
176
+ await event_handler(AgentEvent(type="agent_reason", payload={"message": reason_visible_text, "step": steps}))
177
+
178
+ finish_flag = bool(reason_ctrl.get("finish")) if isinstance(reason_ctrl, dict) else False
179
+ req_input = reason_ctrl.get("request_input") if isinstance(reason_ctrl, dict) else None
180
+ if not req_input and isinstance(reason_visible_text, str) and '"request_input"' in reason_visible_text:
181
+ try:
182
+ import re as _re
183
+ m = _re.search(r'"request_input"\s*:\s*\{[^}]*"question"\s*:\s*"([^"]+)"', reason_visible_text)
184
+ if m:
185
+ req_input = {"question": m.group(1)}
186
+ except Exception:
187
+ # Regex parsing failed, continue with JSON fallback
188
+ pass
189
+
190
+ if req_input and isinstance(req_input, dict) and req_input.get("question"):
191
+ await event_handler(AgentEvent(type="agent_request_input", payload={"question": str(req_input.get("question")), "step": steps}))
192
+ user_reply: Optional[str] = None
193
+ for _ in range(600):
194
+ ctrl = await self._poll_control_message(timeout_sec=0.1)
195
+ if ctrl and ctrl.get("type") == "agent_user_input" and ctrl.get("content"):
196
+ user_reply = str(ctrl.get("content"))
197
+ break
198
+ if ctrl and ctrl.get("type") == "agent_control" and ctrl.get("action") == "stop":
199
+ break
200
+ if user_reply:
201
+ messages.append({"role": "user", "content": user_reply})
202
+ user_question = user_reply
203
+ last_observation = "User provided additional input."
204
+ continue
205
+ break
206
+
207
+ if finish_flag:
208
+ final_response = reason_ctrl.get("final_answer") or reason_resp.content
209
+ break
210
+
211
+ # ----- Act -----
212
+ tools_schema: List[Dict[str, Any]] = []
213
+ if selected_tools and self.tool_manager:
214
+ tools_schema = await error_handler.safe_get_tools_schema(self.tool_manager, selected_tools)
215
+
216
+ tool_results: List[ToolResult] = []
217
+ # Use "required" to force tool calling during Act phase
218
+ # The LiteLLM caller has fallback logic to "auto" if "required" is not supported
219
+ if tools_schema:
220
+ if data_sources and context.user_email:
221
+ llm_response = await self.llm.call_with_rag_and_tools(
222
+ model, messages, data_sources, tools_schema, context.user_email, "required", temperature=temperature
223
+ )
224
+ else:
225
+ llm_response = await self.llm.call_with_tools(
226
+ model, messages, tools_schema, "required", temperature=temperature
227
+ )
228
+
229
+ if llm_response.has_tool_calls():
230
+ # Execute only first call
231
+ first_call = (llm_response.tool_calls or [None])[0]
232
+ if first_call is None:
233
+ if llm_response.content:
234
+ final_response = llm_response.content
235
+ break
236
+ messages.append({
237
+ "role": "assistant",
238
+ "content": llm_response.content,
239
+ "tool_calls": [first_call],
240
+ })
241
+ result = await tool_executor.execute_single_tool(
242
+ tool_call=first_call,
243
+ session_context={
244
+ "session_id": context.session_id,
245
+ "user_email": context.user_email,
246
+ "files": context.files,
247
+ },
248
+ tool_manager=self.tool_manager,
249
+ update_callback=(self.connection.send_json if self.connection else None),
250
+ config_manager=self.config_manager,
251
+ skip_approval=self.skip_approval,
252
+ )
253
+ tool_results.append(result)
254
+ messages.append({
255
+ "role": "tool",
256
+ "content": result.content,
257
+ "tool_call_id": result.tool_call_id,
258
+ })
259
+
260
+ # Emit an internal event with actual ToolResult(s) for the service to ingest artifacts
261
+ await event_handler(AgentEvent(type="agent_tool_results", payload={"results": tool_results}))
262
+ else:
263
+ if llm_response.content:
264
+ final_response = llm_response.content
265
+ break
266
+
267
+ # ----- Observe -----
268
+ summaries: List[str] = []
269
+ # We already emitted tool_complete with results above for ingestion; here just build readable summary text.
270
+ # If needed, we can reconstruct from last messages.
271
+ if messages:
272
+ # crude extraction of last tool message
273
+ for msg in reversed(messages):
274
+ if msg.get("role") == "tool":
275
+ content_preview = (msg.get("content") or "").strip()
276
+ if len(content_preview) > 400:
277
+ content_preview = content_preview[:400] + "..."
278
+ summaries.append(content_preview)
279
+ break
280
+ tool_summaries_text = "\n".join(summaries) if summaries else "No tools were executed."
281
+
282
+ observe_prompt = None
283
+ if self.prompt_provider:
284
+ observe_prompt = self.prompt_provider.get_agent_observe_prompt(
285
+ user_question=user_question,
286
+ tool_summaries=tool_summaries_text,
287
+ step=steps,
288
+ )
289
+ observe_messages = list(messages)
290
+ if observe_prompt:
291
+ observe_messages.append({"role": "system", "content": observe_prompt})
292
+ observe_tools_schema: List[Dict[str, Any]] = [
293
+ {
294
+ "type": "function",
295
+ "function": {
296
+ "name": "agent_observe_decide",
297
+ "description": "Given the observations, decide whether to continue another step or finish.",
298
+ "parameters": {
299
+ "type": "object",
300
+ "properties": {
301
+ "should_continue": {"type": "boolean"},
302
+ "final_answer": {"type": "string"},
303
+ "observation": {"type": "string"},
304
+ },
305
+ "additionalProperties": False,
306
+ },
307
+ },
308
+ }
309
+ ]
310
+
311
+ observe_resp: LLMResponse = await self.llm.call_with_tools(
312
+ model, observe_messages, observe_tools_schema, "required", temperature=temperature
313
+ )
314
+ observe_ctrl = self._extract_tool_args(observe_resp, "agent_observe_decide") or self._parse_control_json(observe_resp.content)
315
+ observe_visible_text: str = observe_resp.content or ""
316
+ if not observe_ctrl:
317
+ observe_text_fallback = await self.llm.call_plain(model, observe_messages, temperature=temperature)
318
+ observe_visible_text = observe_text_fallback
319
+ observe_ctrl = self._parse_control_json(observe_text_fallback)
320
+
321
+ await event_handler(AgentEvent(type="agent_observe", payload={"message": observe_visible_text, "step": steps}))
322
+
323
+ if isinstance(observe_ctrl, dict):
324
+ final_candidate = observe_ctrl.get("final_answer")
325
+ should_continue = observe_ctrl.get("should_continue", True)
326
+ if final_candidate and isinstance(final_candidate, str) and final_candidate.strip():
327
+ final_response = final_candidate
328
+ break
329
+ if not should_continue:
330
+ final_response = observe_visible_text
331
+ break
332
+
333
+ last_observation = observe_visible_text
334
+
335
+ if not final_response:
336
+ final_response = await self.llm.call_plain(model, messages, temperature=temperature)
337
+
338
+ return AgentResult(final_answer=final_response, steps=steps, metadata={"agent_mode": True})
@@ -0,0 +1,171 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from atlas.interfaces.llm import LLMProtocol, LLMResponse
6
+ from atlas.interfaces.tools import ToolManagerProtocol
7
+ from atlas.modules.prompts.prompt_provider import PromptProvider
8
+
9
+ from ..utilities import error_handler, tool_executor
10
+ from .protocols import AgentContext, AgentEvent, AgentEventHandler, AgentLoopProtocol, AgentResult
11
+
12
+
13
+ class ThinkActAgentLoop(AgentLoopProtocol):
14
+ """UserInput -> Think (planning) -> repeat N times: Act -> Think -> Final Think.
15
+
16
+ Differences vs ReActAgentLoop:
17
+ - Single "think" function used for both planning and observation phases.
18
+ - Executes at most one tool per action step.
19
+ - Does not reuse the existing MCP think functions; uses internal prompts via LLM tools.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ *,
25
+ llm: LLMProtocol,
26
+ tool_manager: Optional[ToolManagerProtocol],
27
+ prompt_provider: Optional[PromptProvider],
28
+ connection: Any = None,
29
+ config_manager=None,
30
+ ) -> None:
31
+ self.llm = llm
32
+ self.tool_manager = tool_manager
33
+ self.prompt_provider = prompt_provider
34
+ self.connection = connection
35
+ self.config_manager = config_manager
36
+ self.skip_approval = False
37
+
38
+ async def run(
39
+ self,
40
+ *,
41
+ model: str,
42
+ messages: List[Dict[str, Any]],
43
+ context: AgentContext,
44
+ selected_tools: Optional[List[str]],
45
+ data_sources: Optional[List[str]],
46
+ max_steps: int,
47
+ temperature: float,
48
+ event_handler: AgentEventHandler,
49
+ ) -> AgentResult:
50
+ await event_handler(AgentEvent(type="agent_start", payload={"max_steps": max_steps, "strategy": "think-act"}))
51
+
52
+ steps = 0
53
+ final_answer: Optional[str] = None
54
+
55
+ # Initial think
56
+ think_tools_schema = [
57
+ {
58
+ "type": "function",
59
+ "function": {
60
+ "name": "agent_think",
61
+ "description": "Think step: analyze the user input and context, outline next action or finish. Be concise. At max two sentense. You are only thinkig, not acting right now.",
62
+ "parameters": {
63
+ "type": "object",
64
+ "properties": {
65
+ "finish": {"type": "boolean"},
66
+ "final_answer": {"type": "string"},
67
+ "next_action_hint": {"type": "string"},
68
+ },
69
+ "additionalProperties": False,
70
+ },
71
+ },
72
+ }
73
+ ]
74
+
75
+ def parse_args(resp: LLMResponse) -> Dict[str, Any]:
76
+ try:
77
+ # Prefer tool args if present
78
+ if getattr(resp, "tool_calls", None):
79
+ for tc in resp.tool_calls:
80
+ f = tc.get("function") if isinstance(tc, dict) else None
81
+ if f and f.get("name") == "agent_think":
82
+ args = f.get("arguments")
83
+ if isinstance(args, str):
84
+ import json
85
+ try:
86
+ return json.loads(args)
87
+ except Exception:
88
+ return {}
89
+ if isinstance(args, dict):
90
+ return args
91
+ # Fallback to plain JSON content
92
+ import json
93
+ return json.loads(resp.content or "{}")
94
+ except Exception:
95
+ return {}
96
+
97
+ # Emit a synthesized think text to UI
98
+ async def emit_think(text: str, step: int) -> None:
99
+ await event_handler(AgentEvent(type="agent_reason", payload={"message": text, "step": step}))
100
+
101
+ # First think - ALWAYS happens before entering the loop
102
+ steps += 1
103
+ await event_handler(AgentEvent(type="agent_turn_start", payload={"step": steps}))
104
+ first_think = await self.llm.call_with_tools(model, messages, think_tools_schema, "required", temperature=temperature)
105
+ think_args = parse_args(first_think)
106
+ await emit_think(first_think.content or "", steps)
107
+
108
+ # Check if we can finish immediately after first think
109
+ if think_args.get("finish"):
110
+ final_answer = think_args.get("final_answer") or first_think.content
111
+
112
+ # Action loop - entered after first think
113
+ while steps < max_steps and final_answer is None:
114
+ # Act: single tool selection and execution
115
+ tools_schema: List[Dict[str, Any]] = []
116
+ if selected_tools and self.tool_manager:
117
+ tools_schema = await error_handler.safe_get_tools_schema(self.tool_manager, selected_tools)
118
+
119
+ # Use "required" to force tool calling during Act phase
120
+ # The LiteLLM caller has fallback logic to "auto" if "required" is not supported
121
+ if tools_schema:
122
+ if data_sources and context.user_email:
123
+ llm_response = await self.llm.call_with_rag_and_tools(
124
+ model, messages, data_sources, tools_schema, context.user_email, "required", temperature=temperature
125
+ )
126
+ else:
127
+ llm_response = await self.llm.call_with_tools(
128
+ model, messages, tools_schema, "required", temperature=temperature
129
+ )
130
+
131
+ if llm_response.has_tool_calls():
132
+ first_call = (llm_response.tool_calls or [None])[0]
133
+ if first_call is None:
134
+ final_answer = llm_response.content or ""
135
+ break
136
+ messages.append({"role": "assistant", "content": llm_response.content, "tool_calls": [first_call]})
137
+ result = await tool_executor.execute_single_tool(
138
+ tool_call=first_call,
139
+ session_context={
140
+ "session_id": context.session_id,
141
+ "user_email": context.user_email,
142
+ "files": context.files,
143
+ },
144
+ tool_manager=self.tool_manager,
145
+ update_callback=(self.connection.send_json if self.connection else None),
146
+ config_manager=self.config_manager,
147
+ skip_approval=self.skip_approval,
148
+ )
149
+ messages.append({"role": "tool", "content": result.content, "tool_call_id": result.tool_call_id})
150
+ # Notify service to ingest artifacts
151
+ await event_handler(AgentEvent(type="agent_tool_results", payload={"results": [result]}))
152
+ else:
153
+ if llm_response.content:
154
+ final_answer = llm_response.content
155
+ break
156
+
157
+ # Think after action
158
+ steps += 1
159
+ await event_handler(AgentEvent(type="agent_turn_start", payload={"step": steps}))
160
+ think_resp = await self.llm.call_with_tools(model, messages, think_tools_schema, "required", temperature=temperature)
161
+ think_args = parse_args(think_resp)
162
+ await emit_think(think_resp.content or "", steps)
163
+ if think_args.get("finish"):
164
+ final_answer = think_args.get("final_answer") or think_resp.content
165
+ break
166
+
167
+ if not final_answer:
168
+ final_answer = await self.llm.call_plain(model, messages, temperature=temperature)
169
+
170
+ await event_handler(AgentEvent(type="agent_completion", payload={"steps": steps}))
171
+ return AgentResult(final_answer=final_answer, steps=steps, metadata={"agent_mode": True, "strategy": "think-act"})
@@ -0,0 +1,151 @@
1
+ """
2
+ Tool approval service for managing approval requests and responses.
3
+
4
+ This module handles the approval workflow for tool calls, allowing users to
5
+ approve, reject, or edit tool arguments before execution.
6
+ """
7
+
8
+ import asyncio
9
+ import logging
10
+ from typing import Any, Dict, Optional
11
+
12
+ from atlas.core.log_sanitizer import sanitize_for_logging
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class ToolApprovalRequest:
18
+ """Represents a pending tool approval request."""
19
+
20
+ def __init__(
21
+ self,
22
+ tool_call_id: str,
23
+ tool_name: str,
24
+ arguments: Dict[str, Any],
25
+ allow_edit: bool = True
26
+ ):
27
+ self.tool_call_id = tool_call_id
28
+ self.tool_name = tool_name
29
+ self.arguments = arguments
30
+ self.allow_edit = allow_edit
31
+ self.future: asyncio.Future = asyncio.Future()
32
+
33
+ async def wait_for_response(self, timeout: float = 300.0) -> Dict[str, Any]:
34
+ """
35
+ Wait for user response to this approval request.
36
+
37
+ Args:
38
+ timeout: Maximum time to wait in seconds (default 5 minutes)
39
+
40
+ Returns:
41
+ Dict with 'approved', 'arguments', and optional 'reason'
42
+
43
+ Raises:
44
+ asyncio.TimeoutError: If timeout is reached
45
+ """
46
+ try:
47
+ return await asyncio.wait_for(self.future, timeout=timeout)
48
+ except asyncio.TimeoutError:
49
+ logger.warning(f"Approval request timed out for tool {self.tool_name}")
50
+ raise
51
+
52
+ def set_response(self, approved: bool, arguments: Optional[Dict[str, Any]] = None, reason: Optional[str] = None):
53
+ """Set the user's response to this approval request."""
54
+ if not self.future.done():
55
+ self.future.set_result({
56
+ "approved": approved,
57
+ "arguments": arguments or self.arguments,
58
+ "reason": reason
59
+ })
60
+
61
+
62
+ class ToolApprovalManager:
63
+ """Manages tool approval requests and responses."""
64
+
65
+ def __init__(self):
66
+ self._pending_requests: Dict[str, ToolApprovalRequest] = {}
67
+
68
+ def create_approval_request(
69
+ self,
70
+ tool_call_id: str,
71
+ tool_name: str,
72
+ arguments: Dict[str, Any],
73
+ allow_edit: bool = True
74
+ ) -> ToolApprovalRequest:
75
+ """
76
+ Create a new approval request.
77
+
78
+ Args:
79
+ tool_call_id: Unique ID for this tool call
80
+ tool_name: Name of the tool being called
81
+ arguments: Tool arguments
82
+ allow_edit: Whether to allow editing of arguments
83
+
84
+ Returns:
85
+ ToolApprovalRequest object
86
+ """
87
+ request = ToolApprovalRequest(tool_call_id, tool_name, arguments, allow_edit)
88
+ self._pending_requests[tool_call_id] = request
89
+ logger.info(f"Created approval request for tool {sanitize_for_logging(tool_name)} (call_id: {sanitize_for_logging(tool_call_id)})")
90
+ return request
91
+
92
+ def handle_approval_response(
93
+ self,
94
+ tool_call_id: str,
95
+ approved: bool,
96
+ arguments: Optional[Dict[str, Any]] = None,
97
+ reason: Optional[str] = None
98
+ ) -> bool:
99
+ """
100
+ Handle a user's response to an approval request.
101
+
102
+ Args:
103
+ tool_call_id: ID of the tool call being responded to
104
+ approved: Whether the user approved the call
105
+ arguments: Potentially edited arguments (if allowed)
106
+ reason: Optional reason for rejection
107
+
108
+ Returns:
109
+ True if request was found and handled, False otherwise
110
+ """
111
+ logger.debug(
112
+ "handle_approval_response called: tool_call_id=%s, approved=%s",
113
+ sanitize_for_logging(tool_call_id),
114
+ sanitize_for_logging(approved),
115
+ )
116
+ logger.debug("Pending requests: %s", [sanitize_for_logging(key) for key in self._pending_requests.keys()])
117
+
118
+ request = self._pending_requests.get(tool_call_id)
119
+ if request is None:
120
+ logger.warning(f"Received approval response for unknown tool call: {sanitize_for_logging(tool_call_id)}")
121
+ logger.debug("Available pending requests: %s", list(self._pending_requests.keys()))
122
+ return False
123
+
124
+ logger.debug("Found pending request for %s; setting response", sanitize_for_logging(tool_call_id))
125
+ request.set_response(approved, arguments, reason)
126
+ # Keep the request in the dict for a bit to avoid race conditions
127
+ # It will be cleaned up later
128
+ logger.info(f"Approval response handled for tool {sanitize_for_logging(request.tool_name)}: approved={approved}")
129
+ return True
130
+
131
+ def cleanup_request(self, tool_call_id: str):
132
+ """Remove a completed approval request."""
133
+ if tool_call_id in self._pending_requests:
134
+ del self._pending_requests[tool_call_id]
135
+ logger.debug(f"Cleaned up approval request: {tool_call_id}")
136
+
137
+ def get_pending_requests(self) -> Dict[str, ToolApprovalRequest]:
138
+ """Get all pending approval requests."""
139
+ return dict(self._pending_requests)
140
+
141
+
142
+ # Global approval manager instance (one per application)
143
+ _approval_manager: Optional[ToolApprovalManager] = None
144
+
145
+
146
+ def get_approval_manager() -> ToolApprovalManager:
147
+ """Get the global tool approval manager instance."""
148
+ global _approval_manager
149
+ if _approval_manager is None:
150
+ _approval_manager = ToolApprovalManager()
151
+ return _approval_manager