mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import tempfile
|
|
6
|
+
import pytest
|
|
7
|
+
from unittest.mock import Mock, patch
|
|
8
|
+
import tornado.web
|
|
9
|
+
from tornado.httputil import HTTPServerRequest
|
|
10
|
+
from tornado.web import Application
|
|
11
|
+
|
|
12
|
+
from mito_ai.file_uploads.handlers import FileUploadHandler
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@pytest.fixture
|
|
16
|
+
def temp_dir():
|
|
17
|
+
"""Create a temporary directory for test files."""
|
|
18
|
+
temp_dir = tempfile.mkdtemp()
|
|
19
|
+
original_cwd = os.getcwd()
|
|
20
|
+
os.chdir(temp_dir)
|
|
21
|
+
yield temp_dir
|
|
22
|
+
os.chdir(original_cwd)
|
|
23
|
+
# Clean up temporary files
|
|
24
|
+
for file in os.listdir(temp_dir):
|
|
25
|
+
os.remove(os.path.join(temp_dir, file))
|
|
26
|
+
os.rmdir(temp_dir)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@pytest.fixture
|
|
30
|
+
def handler():
|
|
31
|
+
"""Create a FileUploadHandler instance for testing."""
|
|
32
|
+
app = Application()
|
|
33
|
+
request = HTTPServerRequest(method="POST", uri="/upload")
|
|
34
|
+
|
|
35
|
+
# Mock the connection to avoid Tornado's assertion
|
|
36
|
+
request.connection = Mock()
|
|
37
|
+
|
|
38
|
+
handler = FileUploadHandler(app, request)
|
|
39
|
+
|
|
40
|
+
# Mock methods properly to avoid mypy errors
|
|
41
|
+
handler.write = Mock() # type: ignore
|
|
42
|
+
handler.finish = Mock() # type: ignore
|
|
43
|
+
handler.set_status = Mock() # type: ignore
|
|
44
|
+
handler.get_argument = Mock() # type: ignore
|
|
45
|
+
|
|
46
|
+
# Mock authentication for Jupyter server
|
|
47
|
+
handler._jupyter_current_user = "test_user" # type: ignore
|
|
48
|
+
|
|
49
|
+
return handler
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_validate_file_upload_success(handler):
|
|
53
|
+
"""Test successful file upload validation."""
|
|
54
|
+
handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
|
|
55
|
+
result = handler._validate_file_upload()
|
|
56
|
+
assert result is True
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def test_validate_file_upload_failure(handler):
|
|
60
|
+
"""Test file upload validation when no file is present."""
|
|
61
|
+
handler.request.files = {} # type: ignore
|
|
62
|
+
result = handler._validate_file_upload()
|
|
63
|
+
assert result is False
|
|
64
|
+
handler.set_status.assert_called_with(400)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def test_regular_upload_success(handler, temp_dir):
|
|
68
|
+
"""Test successful regular (non-chunked) file upload."""
|
|
69
|
+
filename = "test.csv"
|
|
70
|
+
file_data = b"test,data\n1,2"
|
|
71
|
+
notebook_dir = temp_dir
|
|
72
|
+
|
|
73
|
+
handler._handle_regular_upload(filename, file_data, notebook_dir)
|
|
74
|
+
|
|
75
|
+
# Verify file was written
|
|
76
|
+
file_path = os.path.join(notebook_dir, filename)
|
|
77
|
+
with open(file_path, "rb") as f:
|
|
78
|
+
content = f.read()
|
|
79
|
+
assert content == file_data
|
|
80
|
+
|
|
81
|
+
# Verify response
|
|
82
|
+
handler.write.assert_called_with(
|
|
83
|
+
{"success": True, "filename": filename, "path": file_path}
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def test_chunked_upload_first_chunk(handler, temp_dir):
|
|
88
|
+
"""Test handling first chunk of a chunked upload."""
|
|
89
|
+
filename = "large_file.csv"
|
|
90
|
+
file_data = b"chunk1_data"
|
|
91
|
+
chunk_number = "1"
|
|
92
|
+
total_chunks = "3"
|
|
93
|
+
notebook_dir = temp_dir
|
|
94
|
+
|
|
95
|
+
handler._handle_chunked_upload(
|
|
96
|
+
filename, file_data, chunk_number, total_chunks, notebook_dir
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Verify chunk was saved (check temp dir structure)
|
|
100
|
+
assert filename in handler._temp_dirs
|
|
101
|
+
temp_dir_path = handler._temp_dirs[filename]["temp_dir"]
|
|
102
|
+
chunk_file = os.path.join(temp_dir_path, "chunk_1")
|
|
103
|
+
assert os.path.exists(chunk_file)
|
|
104
|
+
|
|
105
|
+
# Verify response indicates chunk received but not complete
|
|
106
|
+
handler.write.assert_called_with(
|
|
107
|
+
{
|
|
108
|
+
"success": True,
|
|
109
|
+
"chunk_received": True,
|
|
110
|
+
"chunk_number": 1,
|
|
111
|
+
"total_chunks": 3,
|
|
112
|
+
}
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def test_chunked_upload_completion(handler, temp_dir):
|
|
117
|
+
"""Test completing a chunked upload when all chunks are received."""
|
|
118
|
+
filename = "large_file.csv"
|
|
119
|
+
total_chunks = 2
|
|
120
|
+
notebook_dir = temp_dir
|
|
121
|
+
|
|
122
|
+
# Process first chunk
|
|
123
|
+
handler._handle_chunked_upload(
|
|
124
|
+
filename, b"chunk1_data", "1", str(total_chunks), notebook_dir
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Process final chunk
|
|
128
|
+
handler._handle_chunked_upload(
|
|
129
|
+
filename, b"chunk2_data", "2", str(total_chunks), notebook_dir
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# Verify final file was created
|
|
133
|
+
file_path = os.path.join(notebook_dir, filename)
|
|
134
|
+
assert os.path.exists(file_path)
|
|
135
|
+
with open(file_path, "rb") as f:
|
|
136
|
+
content = f.read()
|
|
137
|
+
assert content == b"chunk1_datachunk2_data"
|
|
138
|
+
|
|
139
|
+
# Verify temp dir was cleaned up
|
|
140
|
+
assert filename not in handler._temp_dirs
|
|
141
|
+
|
|
142
|
+
# Verify completion response
|
|
143
|
+
handler.write.assert_called_with(
|
|
144
|
+
{
|
|
145
|
+
"success": True,
|
|
146
|
+
"filename": filename,
|
|
147
|
+
"path": file_path,
|
|
148
|
+
"chunk_complete": True,
|
|
149
|
+
}
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def test_error_handling(handler):
|
|
154
|
+
"""Test error handling in upload process."""
|
|
155
|
+
error_message = "Test error message"
|
|
156
|
+
status_code = 500
|
|
157
|
+
|
|
158
|
+
handler._handle_error(error_message, status_code)
|
|
159
|
+
|
|
160
|
+
handler.set_status.assert_called_with(status_code)
|
|
161
|
+
handler.write.assert_called_with({"error": error_message})
|
|
162
|
+
handler.finish.assert_called_once()
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
@patch("mito_ai.file_uploads.handlers.FileUploadHandler._validate_file_upload")
|
|
166
|
+
def test_post_method_regular_upload(mock_validate, handler):
|
|
167
|
+
"""Test POST method for regular upload."""
|
|
168
|
+
mock_validate.return_value = True
|
|
169
|
+
handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
|
|
170
|
+
handler.get_argument.return_value = None # No chunk parameters
|
|
171
|
+
|
|
172
|
+
handler.post()
|
|
173
|
+
|
|
174
|
+
mock_validate.assert_called_once()
|
|
175
|
+
handler.finish.assert_called_once()
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
@patch("mito_ai.file_uploads.handlers.FileUploadHandler._validate_file_upload")
|
|
179
|
+
def test_post_method_chunked_upload(mock_validate, handler):
|
|
180
|
+
"""Test POST method for chunked upload."""
|
|
181
|
+
mock_validate.return_value = True
|
|
182
|
+
handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
|
|
183
|
+
handler.get_argument.side_effect = lambda name, default=None: {
|
|
184
|
+
"chunk_number": "1",
|
|
185
|
+
"total_chunks": "3",
|
|
186
|
+
}.get(name, default)
|
|
187
|
+
|
|
188
|
+
handler.post()
|
|
189
|
+
|
|
190
|
+
mock_validate.assert_called_once()
|
|
191
|
+
handler.finish.assert_called_once()
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def test_are_all_chunks_received_true(handler, temp_dir):
|
|
195
|
+
"""Test that all chunks are detected when present."""
|
|
196
|
+
filename = "test.csv"
|
|
197
|
+
total_chunks = 2
|
|
198
|
+
|
|
199
|
+
# Manually set up the temp dir structure
|
|
200
|
+
temp_dir_path = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
|
|
201
|
+
handler._temp_dirs[filename] = {
|
|
202
|
+
"temp_dir": temp_dir_path,
|
|
203
|
+
"total_chunks": total_chunks,
|
|
204
|
+
"received_chunks": {1, 2},
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
result = handler._are_all_chunks_received(filename, total_chunks)
|
|
208
|
+
assert result is True
|
|
209
|
+
|
|
210
|
+
# Clean up
|
|
211
|
+
import shutil
|
|
212
|
+
|
|
213
|
+
shutil.rmtree(temp_dir_path)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def test_are_all_chunks_received_false(handler, temp_dir):
|
|
217
|
+
"""Test that missing chunks are detected."""
|
|
218
|
+
filename = "test.csv"
|
|
219
|
+
total_chunks = 2
|
|
220
|
+
|
|
221
|
+
# Manually set up the temp dir structure with only one chunk
|
|
222
|
+
temp_dir_path = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
|
|
223
|
+
handler._temp_dirs[filename] = {
|
|
224
|
+
"temp_dir": temp_dir_path,
|
|
225
|
+
"total_chunks": total_chunks,
|
|
226
|
+
"received_chunks": {1}, # Only chunk 1 received
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
result = handler._are_all_chunks_received(filename, total_chunks)
|
|
230
|
+
assert result is False
|
|
231
|
+
|
|
232
|
+
# Clean up
|
|
233
|
+
import shutil
|
|
234
|
+
|
|
235
|
+
shutil.rmtree(temp_dir_path)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def test_save_chunk(handler, temp_dir):
|
|
239
|
+
"""Test saving individual chunks."""
|
|
240
|
+
filename = "test.csv"
|
|
241
|
+
file_data = b"chunk_data"
|
|
242
|
+
chunk_number = 1
|
|
243
|
+
total_chunks = 3
|
|
244
|
+
|
|
245
|
+
# Mock the file operations to avoid filesystem issues
|
|
246
|
+
with patch("builtins.open", create=True) as mock_open:
|
|
247
|
+
mock_file = Mock()
|
|
248
|
+
mock_open.return_value.__enter__.return_value = mock_file
|
|
249
|
+
|
|
250
|
+
handler._save_chunk(filename, file_data, chunk_number, total_chunks)
|
|
251
|
+
|
|
252
|
+
# Verify temp dir was created in the handler's tracking
|
|
253
|
+
assert filename in handler._temp_dirs
|
|
254
|
+
temp_dir_path = handler._temp_dirs[filename]["temp_dir"]
|
|
255
|
+
|
|
256
|
+
# Verify the expected chunk filename was used
|
|
257
|
+
expected_chunk_filename = os.path.join(temp_dir_path, f"chunk_{chunk_number}")
|
|
258
|
+
mock_open.assert_called_with(expected_chunk_filename, "wb")
|
|
259
|
+
|
|
260
|
+
# Verify file data was written
|
|
261
|
+
mock_file.write.assert_called_with(file_data)
|
|
262
|
+
|
|
263
|
+
# Verify chunk was marked as received
|
|
264
|
+
assert chunk_number in handler._temp_dirs[filename]["received_chunks"]
|
|
265
|
+
|
|
266
|
+
# Clean up
|
|
267
|
+
del handler._temp_dirs[filename]
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def test_image_size_limit_exceeded(handler, temp_dir):
|
|
271
|
+
"""Test that image uploads exceeding 3MB are rejected."""
|
|
272
|
+
filename = "large_image.jpg"
|
|
273
|
+
# Create 5MB of data (5 * 1024 * 1024 bytes)
|
|
274
|
+
file_data = b"x" * (5 * 1024 * 1024)
|
|
275
|
+
notebook_dir = temp_dir
|
|
276
|
+
|
|
277
|
+
# The _handle_regular_upload should raise a ValueError for oversized images
|
|
278
|
+
with pytest.raises(ValueError) as exc_info:
|
|
279
|
+
handler._handle_regular_upload(filename, file_data, notebook_dir)
|
|
280
|
+
|
|
281
|
+
# Verify the error message mentions the size limit
|
|
282
|
+
assert "exceeded 3MB limit" in str(exc_info.value)
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
from unittest.mock import AsyncMock, MagicMock, patch
|
|
6
|
+
from traitlets.config import Config
|
|
7
|
+
from mito_ai.completions.message_history import generate_short_chat_name
|
|
8
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def provider_config() -> Config:
|
|
13
|
+
"""Create a proper Config object for the OpenAIProvider."""
|
|
14
|
+
config = Config()
|
|
15
|
+
config.OpenAIProvider = Config()
|
|
16
|
+
config.OpenAIClient = Config()
|
|
17
|
+
return config
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# Test cases for different models and their expected providers/fast models
|
|
21
|
+
PROVIDER_TEST_CASES = [
|
|
22
|
+
# (model, client_patch_path)
|
|
23
|
+
("gpt-4.1", "mito_ai.completions.providers.OpenAIClient"),
|
|
24
|
+
("claude-3-5-sonnet-20241022", "mito_ai.completions.providers.AnthropicClient"),
|
|
25
|
+
("gemini-2.0-flash-exp", "mito_ai.completions.providers.GeminiClient")
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
@pytest.mark.parametrize("selected_model,client_patch_path", PROVIDER_TEST_CASES)
|
|
29
|
+
@pytest.mark.asyncio
|
|
30
|
+
async def test_generate_short_chat_name_uses_correct_provider_and_fast_model(
|
|
31
|
+
selected_model: str,
|
|
32
|
+
client_patch_path: str,
|
|
33
|
+
provider_config: Config,
|
|
34
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
35
|
+
) -> None:
|
|
36
|
+
"""Test that generate_short_chat_name uses the correct provider and that the client uses the fast model."""
|
|
37
|
+
|
|
38
|
+
# Set up environment variables for all providers
|
|
39
|
+
monkeypatch.setenv("OPENAI_API_KEY", "fake-openai-key")
|
|
40
|
+
monkeypatch.setenv("CLAUDE_API_KEY", "fake-claude-key")
|
|
41
|
+
monkeypatch.setenv("GEMINI_API_KEY", "fake-gemini-key")
|
|
42
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "fake-openai-key")
|
|
43
|
+
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "fake-claude-key")
|
|
44
|
+
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", "fake-gemini-key")
|
|
45
|
+
|
|
46
|
+
# Create mock client for the specific provider being tested
|
|
47
|
+
mock_client = MagicMock()
|
|
48
|
+
mock_client.request_completions = AsyncMock(return_value="Test Chat Name")
|
|
49
|
+
|
|
50
|
+
# Patch the specific client class that should be used based on the model
|
|
51
|
+
# We need to patch before creating the OpenAIProvider since OpenAI client is created in constructor
|
|
52
|
+
with patch(client_patch_path, return_value=mock_client):
|
|
53
|
+
# Create the OpenAIProvider after patching so the mock client is used
|
|
54
|
+
llm_provider = OpenAIProvider(config=provider_config)
|
|
55
|
+
|
|
56
|
+
# Test the function
|
|
57
|
+
result = await generate_short_chat_name(
|
|
58
|
+
user_message="What is the capital of France?",
|
|
59
|
+
assistant_message="The capital of France is Paris.",
|
|
60
|
+
model=selected_model,
|
|
61
|
+
llm_provider=llm_provider
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Verify that the correct client's request_completions was called
|
|
65
|
+
mock_client.request_completions.assert_called_once()
|
|
66
|
+
|
|
67
|
+
# As a double check, if we have used the correct client, then we must get the correct result
|
|
68
|
+
# from the mocked client as well.
|
|
69
|
+
assert result == "Test Chat Name"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@pytest.mark.asyncio
|
|
73
|
+
async def test_generate_short_chat_name_cleans_gemini_response() -> None:
|
|
74
|
+
"""Test that generate_short_chat_name properly cleans Gemini-style responses with quotes and newlines."""
|
|
75
|
+
|
|
76
|
+
# Create mock llm_provider that returns a response with quotes and newlines
|
|
77
|
+
mock_llm_provider = MagicMock(spec=OpenAIProvider)
|
|
78
|
+
mock_llm_provider.request_completions = AsyncMock(return_value='"France Geography Discussion\n"')
|
|
79
|
+
|
|
80
|
+
result = await generate_short_chat_name(
|
|
81
|
+
user_message="What is the capital of France?",
|
|
82
|
+
assistant_message="The capital of France is Paris.",
|
|
83
|
+
model="gemini-2.0-flash-exp",
|
|
84
|
+
llm_provider=mock_llm_provider
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Verify the response was cleaned properly
|
|
88
|
+
assert result == "France Geography Discussion"
|
|
89
|
+
assert '"' not in result
|
|
90
|
+
assert '\n' not in result
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@pytest.mark.asyncio
|
|
94
|
+
async def test_generate_short_chat_name_handles_empty_response() -> None:
|
|
95
|
+
"""Test that generate_short_chat_name handles empty or None responses gracefully."""
|
|
96
|
+
|
|
97
|
+
# Test with empty string response
|
|
98
|
+
mock_llm_provider = MagicMock(spec=OpenAIProvider)
|
|
99
|
+
mock_llm_provider.request_completions = AsyncMock(return_value="")
|
|
100
|
+
|
|
101
|
+
result = await generate_short_chat_name(
|
|
102
|
+
user_message="Test message",
|
|
103
|
+
assistant_message="Test response",
|
|
104
|
+
model="gpt-4.1",
|
|
105
|
+
llm_provider=mock_llm_provider
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
assert result == "Untitled Chat"
|
|
109
|
+
|
|
110
|
+
# Test with None response
|
|
111
|
+
mock_llm_provider.request_completions = AsyncMock(return_value=None)
|
|
112
|
+
|
|
113
|
+
result = await generate_short_chat_name(
|
|
114
|
+
user_message="Test message",
|
|
115
|
+
assistant_message="Test response",
|
|
116
|
+
model="gpt-4.1",
|
|
117
|
+
llm_provider=mock_llm_provider
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
assert result == "Untitled Chat"
|