atlas-chat 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- atlas/__init__.py +40 -0
- atlas/application/__init__.py +7 -0
- atlas/application/chat/__init__.py +7 -0
- atlas/application/chat/agent/__init__.py +10 -0
- atlas/application/chat/agent/act_loop.py +179 -0
- atlas/application/chat/agent/factory.py +142 -0
- atlas/application/chat/agent/protocols.py +46 -0
- atlas/application/chat/agent/react_loop.py +338 -0
- atlas/application/chat/agent/think_act_loop.py +171 -0
- atlas/application/chat/approval_manager.py +151 -0
- atlas/application/chat/elicitation_manager.py +191 -0
- atlas/application/chat/events/__init__.py +1 -0
- atlas/application/chat/events/agent_event_relay.py +112 -0
- atlas/application/chat/modes/__init__.py +1 -0
- atlas/application/chat/modes/agent.py +125 -0
- atlas/application/chat/modes/plain.py +74 -0
- atlas/application/chat/modes/rag.py +81 -0
- atlas/application/chat/modes/tools.py +179 -0
- atlas/application/chat/orchestrator.py +213 -0
- atlas/application/chat/policies/__init__.py +1 -0
- atlas/application/chat/policies/tool_authorization.py +99 -0
- atlas/application/chat/preprocessors/__init__.py +1 -0
- atlas/application/chat/preprocessors/message_builder.py +92 -0
- atlas/application/chat/preprocessors/prompt_override_service.py +104 -0
- atlas/application/chat/service.py +454 -0
- atlas/application/chat/utilities/__init__.py +6 -0
- atlas/application/chat/utilities/error_handler.py +367 -0
- atlas/application/chat/utilities/event_notifier.py +546 -0
- atlas/application/chat/utilities/file_processor.py +613 -0
- atlas/application/chat/utilities/tool_executor.py +789 -0
- atlas/atlas_chat_cli.py +347 -0
- atlas/atlas_client.py +238 -0
- atlas/core/__init__.py +0 -0
- atlas/core/auth.py +205 -0
- atlas/core/authorization_manager.py +27 -0
- atlas/core/capabilities.py +123 -0
- atlas/core/compliance.py +215 -0
- atlas/core/domain_whitelist.py +147 -0
- atlas/core/domain_whitelist_middleware.py +82 -0
- atlas/core/http_client.py +28 -0
- atlas/core/log_sanitizer.py +102 -0
- atlas/core/metrics_logger.py +59 -0
- atlas/core/middleware.py +131 -0
- atlas/core/otel_config.py +242 -0
- atlas/core/prompt_risk.py +200 -0
- atlas/core/rate_limit.py +0 -0
- atlas/core/rate_limit_middleware.py +64 -0
- atlas/core/security_headers_middleware.py +51 -0
- atlas/domain/__init__.py +37 -0
- atlas/domain/chat/__init__.py +1 -0
- atlas/domain/chat/dtos.py +85 -0
- atlas/domain/errors.py +96 -0
- atlas/domain/messages/__init__.py +12 -0
- atlas/domain/messages/models.py +160 -0
- atlas/domain/rag_mcp_service.py +664 -0
- atlas/domain/sessions/__init__.py +7 -0
- atlas/domain/sessions/models.py +36 -0
- atlas/domain/unified_rag_service.py +371 -0
- atlas/infrastructure/__init__.py +10 -0
- atlas/infrastructure/app_factory.py +135 -0
- atlas/infrastructure/events/__init__.py +1 -0
- atlas/infrastructure/events/cli_event_publisher.py +140 -0
- atlas/infrastructure/events/websocket_publisher.py +140 -0
- atlas/infrastructure/sessions/in_memory_repository.py +56 -0
- atlas/infrastructure/transport/__init__.py +7 -0
- atlas/infrastructure/transport/websocket_connection_adapter.py +33 -0
- atlas/init_cli.py +226 -0
- atlas/interfaces/__init__.py +15 -0
- atlas/interfaces/events.py +134 -0
- atlas/interfaces/llm.py +54 -0
- atlas/interfaces/rag.py +40 -0
- atlas/interfaces/sessions.py +75 -0
- atlas/interfaces/tools.py +57 -0
- atlas/interfaces/transport.py +24 -0
- atlas/main.py +564 -0
- atlas/mcp/api_key_demo/README.md +76 -0
- atlas/mcp/api_key_demo/main.py +172 -0
- atlas/mcp/api_key_demo/run.sh +56 -0
- atlas/mcp/basictable/main.py +147 -0
- atlas/mcp/calculator/main.py +149 -0
- atlas/mcp/code-executor/execution_engine.py +98 -0
- atlas/mcp/code-executor/execution_environment.py +95 -0
- atlas/mcp/code-executor/main.py +528 -0
- atlas/mcp/code-executor/result_processing.py +276 -0
- atlas/mcp/code-executor/script_generation.py +195 -0
- atlas/mcp/code-executor/security_checker.py +140 -0
- atlas/mcp/corporate_cars/main.py +437 -0
- atlas/mcp/csv_reporter/main.py +545 -0
- atlas/mcp/duckduckgo/main.py +182 -0
- atlas/mcp/elicitation_demo/README.md +171 -0
- atlas/mcp/elicitation_demo/main.py +262 -0
- atlas/mcp/env-demo/README.md +158 -0
- atlas/mcp/env-demo/main.py +199 -0
- atlas/mcp/file_size_test/main.py +284 -0
- atlas/mcp/filesystem/main.py +348 -0
- atlas/mcp/image_demo/main.py +113 -0
- atlas/mcp/image_demo/requirements.txt +4 -0
- atlas/mcp/logging_demo/README.md +72 -0
- atlas/mcp/logging_demo/main.py +103 -0
- atlas/mcp/many_tools_demo/main.py +50 -0
- atlas/mcp/order_database/__init__.py +0 -0
- atlas/mcp/order_database/main.py +369 -0
- atlas/mcp/order_database/signal_data.csv +1001 -0
- atlas/mcp/pdfbasic/main.py +394 -0
- atlas/mcp/pptx_generator/main.py +760 -0
- atlas/mcp/pptx_generator/requirements.txt +13 -0
- atlas/mcp/pptx_generator/run_test.sh +1 -0
- atlas/mcp/pptx_generator/test_pptx_generator_security.py +169 -0
- atlas/mcp/progress_demo/main.py +167 -0
- atlas/mcp/progress_updates_demo/QUICKSTART.md +273 -0
- atlas/mcp/progress_updates_demo/README.md +120 -0
- atlas/mcp/progress_updates_demo/main.py +497 -0
- atlas/mcp/prompts/main.py +222 -0
- atlas/mcp/public_demo/main.py +189 -0
- atlas/mcp/sampling_demo/README.md +169 -0
- atlas/mcp/sampling_demo/main.py +234 -0
- atlas/mcp/thinking/main.py +77 -0
- atlas/mcp/tool_planner/main.py +240 -0
- atlas/mcp/ui-demo/badmesh.png +0 -0
- atlas/mcp/ui-demo/main.py +383 -0
- atlas/mcp/ui-demo/templates/button_demo.html +32 -0
- atlas/mcp/ui-demo/templates/data_visualization.html +32 -0
- atlas/mcp/ui-demo/templates/form_demo.html +28 -0
- atlas/mcp/username-override-demo/README.md +320 -0
- atlas/mcp/username-override-demo/main.py +308 -0
- atlas/modules/__init__.py +0 -0
- atlas/modules/config/__init__.py +34 -0
- atlas/modules/config/cli.py +231 -0
- atlas/modules/config/config_manager.py +1096 -0
- atlas/modules/file_storage/__init__.py +22 -0
- atlas/modules/file_storage/cli.py +330 -0
- atlas/modules/file_storage/content_extractor.py +290 -0
- atlas/modules/file_storage/manager.py +295 -0
- atlas/modules/file_storage/mock_s3_client.py +402 -0
- atlas/modules/file_storage/s3_client.py +417 -0
- atlas/modules/llm/__init__.py +19 -0
- atlas/modules/llm/caller.py +287 -0
- atlas/modules/llm/litellm_caller.py +675 -0
- atlas/modules/llm/models.py +19 -0
- atlas/modules/mcp_tools/__init__.py +17 -0
- atlas/modules/mcp_tools/client.py +2123 -0
- atlas/modules/mcp_tools/token_storage.py +556 -0
- atlas/modules/prompts/prompt_provider.py +130 -0
- atlas/modules/rag/__init__.py +24 -0
- atlas/modules/rag/atlas_rag_client.py +336 -0
- atlas/modules/rag/client.py +129 -0
- atlas/routes/admin_routes.py +865 -0
- atlas/routes/config_routes.py +484 -0
- atlas/routes/feedback_routes.py +361 -0
- atlas/routes/files_routes.py +274 -0
- atlas/routes/health_routes.py +40 -0
- atlas/routes/mcp_auth_routes.py +223 -0
- atlas/server_cli.py +164 -0
- atlas/tests/conftest.py +20 -0
- atlas/tests/integration/test_mcp_auth_integration.py +152 -0
- atlas/tests/manual_test_sampling.py +87 -0
- atlas/tests/modules/mcp_tools/test_client_auth.py +226 -0
- atlas/tests/modules/mcp_tools/test_client_env.py +191 -0
- atlas/tests/test_admin_mcp_server_management_routes.py +141 -0
- atlas/tests/test_agent_roa.py +135 -0
- atlas/tests/test_app_factory_smoke.py +47 -0
- atlas/tests/test_approval_manager.py +439 -0
- atlas/tests/test_atlas_client.py +188 -0
- atlas/tests/test_atlas_rag_client.py +447 -0
- atlas/tests/test_atlas_rag_integration.py +224 -0
- atlas/tests/test_attach_file_flow.py +287 -0
- atlas/tests/test_auth_utils.py +165 -0
- atlas/tests/test_backend_public_url.py +185 -0
- atlas/tests/test_banner_logging.py +287 -0
- atlas/tests/test_capability_tokens_and_injection.py +203 -0
- atlas/tests/test_compliance_level.py +54 -0
- atlas/tests/test_compliance_manager.py +253 -0
- atlas/tests/test_config_manager.py +617 -0
- atlas/tests/test_config_manager_paths.py +12 -0
- atlas/tests/test_core_auth.py +18 -0
- atlas/tests/test_core_utils.py +190 -0
- atlas/tests/test_docker_env_sync.py +202 -0
- atlas/tests/test_domain_errors.py +329 -0
- atlas/tests/test_domain_whitelist.py +359 -0
- atlas/tests/test_elicitation_manager.py +408 -0
- atlas/tests/test_elicitation_routing.py +296 -0
- atlas/tests/test_env_demo_server.py +88 -0
- atlas/tests/test_error_classification.py +113 -0
- atlas/tests/test_error_flow_integration.py +116 -0
- atlas/tests/test_feedback_routes.py +333 -0
- atlas/tests/test_file_content_extraction.py +1134 -0
- atlas/tests/test_file_extraction_routes.py +158 -0
- atlas/tests/test_file_library.py +107 -0
- atlas/tests/test_file_manager_unit.py +18 -0
- atlas/tests/test_health_route.py +49 -0
- atlas/tests/test_http_client_stub.py +8 -0
- atlas/tests/test_imports_smoke.py +30 -0
- atlas/tests/test_interfaces_llm_response.py +9 -0
- atlas/tests/test_issue_access_denied_fix.py +136 -0
- atlas/tests/test_llm_env_expansion.py +836 -0
- atlas/tests/test_log_level_sensitive_data.py +285 -0
- atlas/tests/test_mcp_auth_routes.py +341 -0
- atlas/tests/test_mcp_client_auth.py +331 -0
- atlas/tests/test_mcp_data_injection.py +270 -0
- atlas/tests/test_mcp_get_authorized_servers.py +95 -0
- atlas/tests/test_mcp_hot_reload.py +512 -0
- atlas/tests/test_mcp_image_content.py +424 -0
- atlas/tests/test_mcp_logging.py +172 -0
- atlas/tests/test_mcp_progress_updates.py +313 -0
- atlas/tests/test_mcp_prompt_override_system_prompt.py +102 -0
- atlas/tests/test_mcp_prompts_server.py +39 -0
- atlas/tests/test_mcp_tool_result_parsing.py +296 -0
- atlas/tests/test_metrics_logger.py +56 -0
- atlas/tests/test_middleware_auth.py +379 -0
- atlas/tests/test_prompt_risk_and_acl.py +141 -0
- atlas/tests/test_rag_mcp_aggregator.py +204 -0
- atlas/tests/test_rag_mcp_service.py +224 -0
- atlas/tests/test_rate_limit_middleware.py +45 -0
- atlas/tests/test_routes_config_smoke.py +60 -0
- atlas/tests/test_routes_files_download_token.py +41 -0
- atlas/tests/test_routes_files_health.py +18 -0
- atlas/tests/test_runtime_imports.py +53 -0
- atlas/tests/test_sampling_integration.py +482 -0
- atlas/tests/test_security_admin_routes.py +61 -0
- atlas/tests/test_security_capability_tokens.py +65 -0
- atlas/tests/test_security_file_stats_scope.py +21 -0
- atlas/tests/test_security_header_injection.py +191 -0
- atlas/tests/test_security_headers_and_filename.py +63 -0
- atlas/tests/test_shared_session_repository.py +101 -0
- atlas/tests/test_system_prompt_loading.py +181 -0
- atlas/tests/test_token_storage.py +505 -0
- atlas/tests/test_tool_approval_config.py +93 -0
- atlas/tests/test_tool_approval_utils.py +356 -0
- atlas/tests/test_tool_authorization_group_filtering.py +223 -0
- atlas/tests/test_tool_details_in_config.py +108 -0
- atlas/tests/test_tool_planner.py +300 -0
- atlas/tests/test_unified_rag_service.py +398 -0
- atlas/tests/test_username_override_in_approval.py +258 -0
- atlas/tests/test_websocket_auth_header.py +168 -0
- atlas/version.py +6 -0
- atlas_chat-0.1.0.data/data/.env.example +253 -0
- atlas_chat-0.1.0.data/data/config/defaults/compliance-levels.json +44 -0
- atlas_chat-0.1.0.data/data/config/defaults/domain-whitelist.json +123 -0
- atlas_chat-0.1.0.data/data/config/defaults/file-extractors.json +74 -0
- atlas_chat-0.1.0.data/data/config/defaults/help-config.json +198 -0
- atlas_chat-0.1.0.data/data/config/defaults/llmconfig-buggy.yml +11 -0
- atlas_chat-0.1.0.data/data/config/defaults/llmconfig.yml +19 -0
- atlas_chat-0.1.0.data/data/config/defaults/mcp.json +138 -0
- atlas_chat-0.1.0.data/data/config/defaults/rag-sources.json +17 -0
- atlas_chat-0.1.0.data/data/config/defaults/splash-config.json +16 -0
- atlas_chat-0.1.0.dist-info/METADATA +236 -0
- atlas_chat-0.1.0.dist-info/RECORD +250 -0
- atlas_chat-0.1.0.dist-info/WHEEL +5 -0
- atlas_chat-0.1.0.dist-info/entry_points.txt +4 -0
- atlas_chat-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
"""Tests for enhanced MCP progress update notifications."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from unittest.mock import AsyncMock
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
|
|
8
|
+
from atlas.application.chat.utilities.event_notifier import _handle_structured_progress_update, notify_tool_progress
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.mark.asyncio
|
|
12
|
+
async def test_notify_tool_progress_regular():
|
|
13
|
+
"""Test regular progress notification without structured updates."""
|
|
14
|
+
callback = AsyncMock()
|
|
15
|
+
|
|
16
|
+
await notify_tool_progress(
|
|
17
|
+
tool_call_id="test-123",
|
|
18
|
+
tool_name="test_tool",
|
|
19
|
+
progress=5,
|
|
20
|
+
total=10,
|
|
21
|
+
message="Processing...",
|
|
22
|
+
update_callback=callback
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
callback.assert_called_once()
|
|
26
|
+
call_args = callback.call_args[0][0]
|
|
27
|
+
|
|
28
|
+
assert call_args["type"] == "tool_progress"
|
|
29
|
+
assert call_args["tool_call_id"] == "test-123"
|
|
30
|
+
assert call_args["tool_name"] == "test_tool"
|
|
31
|
+
assert call_args["progress"] == 5
|
|
32
|
+
assert call_args["total"] == 10
|
|
33
|
+
assert call_args["percentage"] == 50.0
|
|
34
|
+
assert call_args["message"] == "Processing..."
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@pytest.mark.asyncio
|
|
38
|
+
async def test_notify_tool_progress_canvas_update():
|
|
39
|
+
"""Test canvas update via structured progress message."""
|
|
40
|
+
callback = AsyncMock()
|
|
41
|
+
|
|
42
|
+
update_payload = {
|
|
43
|
+
"type": "canvas_update",
|
|
44
|
+
"content": "<html><body>Test</body></html>",
|
|
45
|
+
"progress_message": "Updating canvas"
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
await notify_tool_progress(
|
|
49
|
+
tool_call_id="test-123",
|
|
50
|
+
tool_name="test_tool",
|
|
51
|
+
progress=1,
|
|
52
|
+
total=5,
|
|
53
|
+
message=f"MCP_UPDATE:{json.dumps(update_payload)}",
|
|
54
|
+
update_callback=callback
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Should be called twice: once for canvas_content, once for tool_progress
|
|
58
|
+
assert callback.call_count == 2
|
|
59
|
+
|
|
60
|
+
# Check canvas_content message
|
|
61
|
+
canvas_call = callback.call_args_list[0][0][0]
|
|
62
|
+
assert canvas_call["type"] == "canvas_content"
|
|
63
|
+
assert canvas_call["content"] == "<html><body>Test</body></html>"
|
|
64
|
+
|
|
65
|
+
# Check progress message
|
|
66
|
+
progress_call = callback.call_args_list[1][0][0]
|
|
67
|
+
assert progress_call["type"] == "tool_progress"
|
|
68
|
+
assert progress_call["message"] == "Updating canvas"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@pytest.mark.asyncio
|
|
72
|
+
async def test_notify_tool_progress_system_message():
|
|
73
|
+
"""Test system message via structured progress message."""
|
|
74
|
+
callback = AsyncMock()
|
|
75
|
+
|
|
76
|
+
update_payload = {
|
|
77
|
+
"type": "system_message",
|
|
78
|
+
"message": "Stage 1 completed",
|
|
79
|
+
"subtype": "success",
|
|
80
|
+
"progress_message": "Completed stage 1"
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
await notify_tool_progress(
|
|
84
|
+
tool_call_id="test-123",
|
|
85
|
+
tool_name="test_tool",
|
|
86
|
+
progress=1,
|
|
87
|
+
total=3,
|
|
88
|
+
message=f"MCP_UPDATE:{json.dumps(update_payload)}",
|
|
89
|
+
update_callback=callback
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Should be called twice: once for intermediate_update, once for tool_progress
|
|
93
|
+
assert callback.call_count == 2
|
|
94
|
+
|
|
95
|
+
# Check system message
|
|
96
|
+
system_call = callback.call_args_list[0][0][0]
|
|
97
|
+
assert system_call["type"] == "intermediate_update"
|
|
98
|
+
assert system_call["update_type"] == "system_message"
|
|
99
|
+
assert system_call["data"]["message"] == "Stage 1 completed"
|
|
100
|
+
assert system_call["data"]["subtype"] == "success"
|
|
101
|
+
assert system_call["data"]["tool_call_id"] == "test-123"
|
|
102
|
+
assert system_call["data"]["tool_name"] == "test_tool"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@pytest.mark.asyncio
|
|
106
|
+
async def test_notify_tool_progress_artifacts():
|
|
107
|
+
"""Test artifacts via structured progress message."""
|
|
108
|
+
callback = AsyncMock()
|
|
109
|
+
|
|
110
|
+
update_payload = {
|
|
111
|
+
"type": "artifacts",
|
|
112
|
+
"artifacts": [
|
|
113
|
+
{
|
|
114
|
+
"name": "result.html",
|
|
115
|
+
"b64": "PGh0bWw+PC9odG1sPg==",
|
|
116
|
+
"mime": "text/html",
|
|
117
|
+
"size": 100,
|
|
118
|
+
"viewer": "html"
|
|
119
|
+
}
|
|
120
|
+
],
|
|
121
|
+
"display": {
|
|
122
|
+
"open_canvas": True,
|
|
123
|
+
"primary_file": "result.html"
|
|
124
|
+
},
|
|
125
|
+
"progress_message": "Generated result"
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
await notify_tool_progress(
|
|
129
|
+
tool_call_id="test-123",
|
|
130
|
+
tool_name="test_tool",
|
|
131
|
+
progress=2,
|
|
132
|
+
total=3,
|
|
133
|
+
message=f"MCP_UPDATE:{json.dumps(update_payload)}",
|
|
134
|
+
update_callback=callback
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Should be called twice: once for intermediate_update, once for tool_progress
|
|
138
|
+
assert callback.call_count == 2
|
|
139
|
+
|
|
140
|
+
# Check artifacts message
|
|
141
|
+
artifacts_call = callback.call_args_list[0][0][0]
|
|
142
|
+
assert artifacts_call["type"] == "intermediate_update"
|
|
143
|
+
assert artifacts_call["update_type"] == "progress_artifacts"
|
|
144
|
+
assert len(artifacts_call["data"]["artifacts"]) == 1
|
|
145
|
+
assert artifacts_call["data"]["artifacts"][0]["name"] == "result.html"
|
|
146
|
+
assert artifacts_call["data"]["display"]["open_canvas"] is True
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@pytest.mark.asyncio
|
|
150
|
+
async def test_notify_tool_progress_artifacts_inline_shape():
|
|
151
|
+
"""Progress artifacts should preserve inline-friendly fields for frontend rendering."""
|
|
152
|
+
callback = AsyncMock()
|
|
153
|
+
|
|
154
|
+
update_payload = {
|
|
155
|
+
"type": "artifacts",
|
|
156
|
+
"artifacts": [
|
|
157
|
+
{
|
|
158
|
+
"name": "progress_step_1.html",
|
|
159
|
+
"b64": "PGgxPkhlbGxvPC9oMT4=",
|
|
160
|
+
"mime": "text/html",
|
|
161
|
+
"size": 42,
|
|
162
|
+
"description": "Step 1",
|
|
163
|
+
"viewer": "html",
|
|
164
|
+
}
|
|
165
|
+
],
|
|
166
|
+
"display": {
|
|
167
|
+
"open_canvas": True,
|
|
168
|
+
"primary_file": "progress_step_1.html",
|
|
169
|
+
"mode": "replace",
|
|
170
|
+
},
|
|
171
|
+
"progress_message": "demo: Step 1/3",
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
await notify_tool_progress(
|
|
175
|
+
tool_call_id="call-1",
|
|
176
|
+
tool_name="progress_tool",
|
|
177
|
+
progress=1,
|
|
178
|
+
total=3,
|
|
179
|
+
message=f"MCP_UPDATE:{json.dumps(update_payload)}",
|
|
180
|
+
update_callback=callback,
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# First callback should carry the raw artifact fields through untouched
|
|
184
|
+
artifacts_call = callback.call_args_list[0][0][0]
|
|
185
|
+
assert artifacts_call["type"] == "intermediate_update"
|
|
186
|
+
assert artifacts_call["update_type"] == "progress_artifacts"
|
|
187
|
+
|
|
188
|
+
data = artifacts_call["data"]
|
|
189
|
+
assert data["tool_call_id"] == "call-1"
|
|
190
|
+
assert data["tool_name"] == "progress_tool"
|
|
191
|
+
|
|
192
|
+
assert isinstance(data["artifacts"], list)
|
|
193
|
+
art = data["artifacts"][0]
|
|
194
|
+
# These fields are required for inline rendering on the frontend
|
|
195
|
+
assert art["name"] == "progress_step_1.html"
|
|
196
|
+
assert art["b64"] == "PGgxPkhlbGxvPC9oMT4="
|
|
197
|
+
assert art["mime"] == "text/html"
|
|
198
|
+
assert art["viewer"] == "html"
|
|
199
|
+
assert art["size"] == 42
|
|
200
|
+
assert art["description"] == "Step 1"
|
|
201
|
+
|
|
202
|
+
display = data["display"]
|
|
203
|
+
assert display["open_canvas"] is True
|
|
204
|
+
assert display["primary_file"] == "progress_step_1.html"
|
|
205
|
+
assert display["mode"] == "replace"
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
@pytest.mark.asyncio
|
|
209
|
+
async def test_notify_tool_progress_invalid_json():
|
|
210
|
+
"""Test that invalid JSON in MCP_UPDATE falls back to regular progress."""
|
|
211
|
+
callback = AsyncMock()
|
|
212
|
+
|
|
213
|
+
await notify_tool_progress(
|
|
214
|
+
tool_call_id="test-123",
|
|
215
|
+
tool_name="test_tool",
|
|
216
|
+
progress=1,
|
|
217
|
+
total=5,
|
|
218
|
+
message="MCP_UPDATE:{invalid json}",
|
|
219
|
+
update_callback=callback
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Should fall back to regular progress notification
|
|
223
|
+
callback.assert_called_once()
|
|
224
|
+
call_args = callback.call_args[0][0]
|
|
225
|
+
assert call_args["type"] == "tool_progress"
|
|
226
|
+
assert "invalid json" in call_args["message"]
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
@pytest.mark.asyncio
|
|
230
|
+
async def test_notify_tool_progress_no_callback():
|
|
231
|
+
"""Test that progress with no callback doesn't raise errors."""
|
|
232
|
+
# Should not raise any exceptions
|
|
233
|
+
await notify_tool_progress(
|
|
234
|
+
tool_call_id="test-123",
|
|
235
|
+
tool_name="test_tool",
|
|
236
|
+
progress=1,
|
|
237
|
+
total=5,
|
|
238
|
+
message="Test",
|
|
239
|
+
update_callback=None
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
@pytest.mark.asyncio
|
|
244
|
+
async def test_handle_structured_progress_update_canvas():
|
|
245
|
+
"""Test _handle_structured_progress_update for canvas updates."""
|
|
246
|
+
callback = AsyncMock()
|
|
247
|
+
|
|
248
|
+
structured_data = {
|
|
249
|
+
"type": "canvas_update",
|
|
250
|
+
"content": "<html>Test</html>",
|
|
251
|
+
"progress_message": "Updating"
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
await _handle_structured_progress_update(
|
|
255
|
+
tool_call_id="test-123",
|
|
256
|
+
tool_name="test_tool",
|
|
257
|
+
progress=1,
|
|
258
|
+
total=5,
|
|
259
|
+
structured_data=structured_data,
|
|
260
|
+
update_callback=callback
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Should send canvas_content and tool_progress
|
|
264
|
+
assert callback.call_count == 2
|
|
265
|
+
assert callback.call_args_list[0][0][0]["type"] == "canvas_content"
|
|
266
|
+
assert callback.call_args_list[1][0][0]["type"] == "tool_progress"
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
@pytest.mark.asyncio
|
|
270
|
+
async def test_percentage_calculation():
|
|
271
|
+
"""Test percentage calculation in progress notifications."""
|
|
272
|
+
callback = AsyncMock()
|
|
273
|
+
|
|
274
|
+
# Test with valid total
|
|
275
|
+
await notify_tool_progress(
|
|
276
|
+
tool_call_id="test-123",
|
|
277
|
+
tool_name="test_tool",
|
|
278
|
+
progress=3,
|
|
279
|
+
total=4,
|
|
280
|
+
message="Test",
|
|
281
|
+
update_callback=callback
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
call_args = callback.call_args[0][0]
|
|
285
|
+
assert call_args["percentage"] == 75.0
|
|
286
|
+
|
|
287
|
+
# Test with zero total
|
|
288
|
+
callback.reset_mock()
|
|
289
|
+
await notify_tool_progress(
|
|
290
|
+
tool_call_id="test-123",
|
|
291
|
+
tool_name="test_tool",
|
|
292
|
+
progress=1,
|
|
293
|
+
total=0,
|
|
294
|
+
message="Test",
|
|
295
|
+
update_callback=callback
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
call_args = callback.call_args[0][0]
|
|
299
|
+
assert call_args["percentage"] is None
|
|
300
|
+
|
|
301
|
+
# Test with None total (indeterminate progress)
|
|
302
|
+
callback.reset_mock()
|
|
303
|
+
await notify_tool_progress(
|
|
304
|
+
tool_call_id="test-123",
|
|
305
|
+
tool_name="test_tool",
|
|
306
|
+
progress=1,
|
|
307
|
+
total=None,
|
|
308
|
+
message="Test",
|
|
309
|
+
update_callback=callback
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
call_args = callback.call_args[0][0]
|
|
313
|
+
assert call_args["percentage"] is None
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from atlas.modules.config import ConfigManager
|
|
7
|
+
from atlas.modules.mcp_tools.client import MCPToolManager
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@pytest.mark.asyncio
|
|
11
|
+
async def test_selected_mcp_prompt_overrides_system_prompt(monkeypatch):
|
|
12
|
+
"""
|
|
13
|
+
Verify that when a prompt is selected (e.g., prompts_expert_dog_trainer),
|
|
14
|
+
the backend injects it as a system message at the start of the LLM messages.
|
|
15
|
+
We patch the LLM caller to capture the messages argument.
|
|
16
|
+
"""
|
|
17
|
+
# Ensure MCP clients and prompts are ready
|
|
18
|
+
# Set up MCP manager directly (avoid importing app_factory/litellm).
|
|
19
|
+
# Use the example prompts MCP config file so this test uses
|
|
20
|
+
# the same JSON configuration as other prompts tests.
|
|
21
|
+
# tests run with cwd=backend/, so resolve from backend root
|
|
22
|
+
backend_root = Path(__file__).parent.parent
|
|
23
|
+
project_root = backend_root.parent
|
|
24
|
+
config_path = project_root / "config" / "mcp-example-configs" / "mcp-prompts.json"
|
|
25
|
+
assert config_path.exists(), f"Missing example prompts config: {config_path}"
|
|
26
|
+
|
|
27
|
+
data = json.loads(config_path.read_text())
|
|
28
|
+
assert "prompts" in data, "prompts server not defined in example config"
|
|
29
|
+
|
|
30
|
+
mcp: MCPToolManager = MCPToolManager(config_path=str(config_path))
|
|
31
|
+
await mcp.initialize_clients()
|
|
32
|
+
await mcp.discover_prompts()
|
|
33
|
+
assert "prompts" in mcp.available_prompts, "prompts server not discovered"
|
|
34
|
+
|
|
35
|
+
captured = {}
|
|
36
|
+
|
|
37
|
+
class DummyLLM:
|
|
38
|
+
async def call_plain(self, model_name, messages, temperature=0.7):
|
|
39
|
+
captured["messages"] = messages
|
|
40
|
+
return "ok"
|
|
41
|
+
|
|
42
|
+
async def call_with_tools(self, model_name, messages, tools_schema, tool_choice="auto", temperature=0.7):
|
|
43
|
+
captured["messages"] = messages
|
|
44
|
+
class R:
|
|
45
|
+
def __init__(self):
|
|
46
|
+
self.content = "ok"
|
|
47
|
+
self.tool_calls = []
|
|
48
|
+
def has_tool_calls(self):
|
|
49
|
+
return False
|
|
50
|
+
return R()
|
|
51
|
+
|
|
52
|
+
async def call_with_rag(self, model_name, messages, data_sources, user_email, temperature=0.7):
|
|
53
|
+
captured["messages"] = messages
|
|
54
|
+
return "ok"
|
|
55
|
+
|
|
56
|
+
async def call_with_rag_and_tools(self, model_name, messages, data_sources, tools_schema, user_email, tool_choice="auto", temperature=0.7):
|
|
57
|
+
captured["messages"] = messages
|
|
58
|
+
class R:
|
|
59
|
+
def __init__(self):
|
|
60
|
+
self.content = "ok"
|
|
61
|
+
self.tool_calls = []
|
|
62
|
+
def has_tool_calls(self):
|
|
63
|
+
return False
|
|
64
|
+
return R()
|
|
65
|
+
|
|
66
|
+
# Create a chat service wired with dummy LLM
|
|
67
|
+
from atlas.application.chat.service import ChatService
|
|
68
|
+
|
|
69
|
+
chat_service = ChatService(
|
|
70
|
+
llm=DummyLLM(),
|
|
71
|
+
tool_manager=mcp,
|
|
72
|
+
connection=None,
|
|
73
|
+
config_manager=ConfigManager(),
|
|
74
|
+
file_manager=None,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Create a session id
|
|
78
|
+
import uuid
|
|
79
|
+
session_id = uuid.uuid4()
|
|
80
|
+
|
|
81
|
+
# Send a message with selected prompt
|
|
82
|
+
await chat_service.handle_chat_message(
|
|
83
|
+
session_id=session_id,
|
|
84
|
+
content="Hello there",
|
|
85
|
+
model="test-model",
|
|
86
|
+
selected_tools=None,
|
|
87
|
+
selected_prompts=["prompts_expert_dog_trainer"],
|
|
88
|
+
selected_data_sources=None,
|
|
89
|
+
user_email="tester@example.com",
|
|
90
|
+
only_rag=False,
|
|
91
|
+
tool_choice_required=False,
|
|
92
|
+
agent_mode=False,
|
|
93
|
+
temperature=0.7,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Validate we injected a system message first
|
|
97
|
+
msgs = captured.get("messages")
|
|
98
|
+
assert msgs, "LLM was not called or messages not captured"
|
|
99
|
+
assert msgs[0]["role"] == "system", f"Expected first message to be system, got: {msgs[0]}"
|
|
100
|
+
# The expert_dog_trainer prompt includes key phrase "expert dog trainer"
|
|
101
|
+
first_content = msgs[0]["content"].lower()
|
|
102
|
+
assert "dog trainer" in first_content or "canine" in first_content, "Injected system prompt content not found"
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from atlas.modules.mcp_tools.client import MCPToolManager
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@pytest.mark.asyncio
|
|
10
|
+
async def test_mcp_prompts_discovery_includes_expert_dog_trainer():
|
|
11
|
+
# Use the example prompts MCP config file so this test
|
|
12
|
+
# exercises the real JSON configuration used for prompts.
|
|
13
|
+
# tests run with cwd=backend/, so resolve from backend root
|
|
14
|
+
backend_root = Path(__file__).parent.parent
|
|
15
|
+
print(backend_root)
|
|
16
|
+
project_root = backend_root.parent
|
|
17
|
+
print(project_root)
|
|
18
|
+
config_path = project_root / "config" / "mcp-example-configs" / "mcp-prompts.json"
|
|
19
|
+
print(config_path)
|
|
20
|
+
assert config_path.exists(), f"Missing example prompts config: {config_path}"
|
|
21
|
+
|
|
22
|
+
# Sanity-check that the JSON contains a "prompts" server.
|
|
23
|
+
data = json.loads(config_path.read_text())
|
|
24
|
+
assert "prompts" in data, "prompts server not defined in example config"
|
|
25
|
+
|
|
26
|
+
mcp = MCPToolManager(config_path=str(config_path))
|
|
27
|
+
|
|
28
|
+
# Ensure fresh clients and prompt discovery
|
|
29
|
+
await mcp.initialize_clients()
|
|
30
|
+
await mcp.discover_prompts()
|
|
31
|
+
|
|
32
|
+
# The prompts server should be configured (from config/overrides mcp.json)
|
|
33
|
+
assert "prompts" in mcp.available_prompts, "prompts server not discovered"
|
|
34
|
+
|
|
35
|
+
server_data = mcp.available_prompts["prompts"]
|
|
36
|
+
prompts = server_data.get("prompts", [])
|
|
37
|
+
names = {getattr(p, "name", None) for p in prompts}
|
|
38
|
+
|
|
39
|
+
assert "expert_dog_trainer" in names, f"expert_dog_trainer not in discovered prompts: {names}"
|