mito-ai 0.1.40__py3-none-any.whl → 0.1.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (64) hide show
  1. mito_ai/__init__.py +19 -6
  2. mito_ai/_version.py +1 -1
  3. mito_ai/app_builder/handlers.py +1 -2
  4. mito_ai/app_manager/__init__.py +4 -0
  5. mito_ai/app_manager/handlers.py +129 -0
  6. mito_ai/app_manager/models.py +58 -0
  7. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  8. mito_ai/completions/completion_handlers/chat_completion_handler.py +2 -2
  9. mito_ai/completions/completion_handlers/utils.py +77 -37
  10. mito_ai/completions/handlers.py +1 -1
  11. mito_ai/completions/message_history.py +9 -1
  12. mito_ai/completions/models.py +3 -1
  13. mito_ai/completions/prompt_builders/agent_execution_prompt.py +2 -0
  14. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  15. mito_ai/completions/prompt_builders/agent_system_message.py +17 -0
  16. mito_ai/completions/prompt_builders/utils.py +7 -0
  17. mito_ai/constants.py +3 -2
  18. mito_ai/file_uploads/__init__.py +3 -0
  19. mito_ai/file_uploads/handlers.py +225 -0
  20. mito_ai/file_uploads/urls.py +21 -0
  21. mito_ai/openai_client.py +1 -1
  22. mito_ai/tests/completions/completion_handlers_utils_test.py +51 -0
  23. mito_ai/tests/file_uploads/__init__.py +2 -0
  24. mito_ai/tests/file_uploads/test_handlers.py +267 -0
  25. mito_ai/tests/message_history/test_message_history_utils.py +57 -4
  26. mito_ai/utils/mito_server_utils.py +7 -0
  27. mito_ai/utils/server_limits.py +1 -1
  28. mito_ai/utils/telemetry_utils.py +26 -9
  29. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  30. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  31. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  32. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js +2662 -1144
  33. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js.map +1 -0
  34. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  35. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  36. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js +50 -26
  37. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js.map +1 -0
  38. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  39. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  40. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  41. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  42. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  43. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  44. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  45. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  46. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  47. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  48. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  49. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  50. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/METADATA +1 -1
  51. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/RECORD +61 -40
  52. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js.map +0 -1
  53. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js.map +0 -1
  54. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  55. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  56. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  57. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  58. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  59. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  60. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  61. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  62. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/WHEEL +0 -0
  63. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/entry_points.txt +0 -0
  64. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,225 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import os
5
+ import tempfile
6
+ import tornado
7
+ from typing import Dict, Any
8
+ from jupyter_server.base.handlers import APIHandler
9
+ from mito_ai.utils.telemetry_utils import log_file_upload_attempt, log_file_upload_failure
10
+
11
+
12
+ class FileUploadHandler(APIHandler):
13
+ # Class-level dictionary to store temporary directories for each file upload
14
+ # This persists across handler instances since Tornado recreates handlers per request
15
+ # Key: filename, Value: dict with temp_dir, total_chunks, received_chunks, logged_upload
16
+ _temp_dirs: Dict[str, Dict[str, Any]] = {}
17
+
18
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
19
+ super().__init__(*args, **kwargs)
20
+
21
+ @tornado.web.authenticated
22
+ def post(self) -> None:
23
+ """Handle file upload with multipart form data."""
24
+ try:
25
+ # Validate request has file
26
+ if not self._validate_file_upload():
27
+ return
28
+
29
+ uploaded_file = self.request.files["file"][0]
30
+ filename = uploaded_file["filename"]
31
+ file_data = uploaded_file["body"]
32
+
33
+ # Get notebook directory from request
34
+ notebook_dir = self.get_argument("notebook_dir", ".")
35
+
36
+ # Check if this is a chunked upload
37
+ chunk_number = self.get_argument("chunk_number", None)
38
+ total_chunks = self.get_argument("total_chunks", None)
39
+
40
+ if chunk_number and total_chunks:
41
+ self._handle_chunked_upload(
42
+ filename, file_data, chunk_number, total_chunks, notebook_dir
43
+ )
44
+ else:
45
+ # Log the file upload attempt for regular (non-chunked) uploads
46
+ file_extension = filename.split(".")[-1].lower()
47
+ log_file_upload_attempt(filename, file_extension, False, 0)
48
+ self._handle_regular_upload(filename, file_data, notebook_dir)
49
+
50
+ self.finish()
51
+
52
+ except Exception as e:
53
+ self._handle_error(f"Failed to save file: {str(e)}")
54
+
55
+ def _validate_file_upload(self) -> bool:
56
+ """Validate that a file was uploaded in the request."""
57
+ if "file" not in self.request.files:
58
+ self._handle_error("No file uploaded", status_code=400)
59
+ return False
60
+ return True
61
+
62
+ def _handle_chunked_upload(
63
+ self,
64
+ filename: str,
65
+ file_data: bytes,
66
+ chunk_number: str,
67
+ total_chunks: str,
68
+ notebook_dir: str,
69
+ ) -> None:
70
+ """Handle chunked file upload."""
71
+ chunk_num = int(chunk_number)
72
+ total_chunks_num = int(total_chunks)
73
+
74
+ # Log the file upload attempt only for the first chunk
75
+ if chunk_num == 1:
76
+ file_extension = filename.split(".")[-1].lower()
77
+ log_file_upload_attempt(filename, file_extension, True, total_chunks_num)
78
+
79
+ # Save chunk to temporary file
80
+ self._save_chunk(filename, file_data, chunk_num, total_chunks_num)
81
+
82
+ # Check if all chunks are received and reconstruct if complete
83
+ if self._are_all_chunks_received(filename, total_chunks_num):
84
+ self._reconstruct_file(filename, total_chunks_num, notebook_dir)
85
+ self._send_chunk_complete_response(filename, notebook_dir)
86
+ else:
87
+ self._send_chunk_received_response(chunk_num, total_chunks_num)
88
+
89
+ def _handle_regular_upload(
90
+ self, filename: str, file_data: bytes, notebook_dir: str
91
+ ) -> None:
92
+ """Handle regular (non-chunked) file upload."""
93
+ file_path = os.path.join(notebook_dir, filename)
94
+ with open(file_path, "wb") as f:
95
+ f.write(file_data)
96
+
97
+ self.write({"success": True, "filename": filename, "path": file_path})
98
+
99
+ def _save_chunk(
100
+ self, filename: str, file_data: bytes, chunk_number: int, total_chunks: int
101
+ ) -> None:
102
+ """Save a chunk to a temporary file."""
103
+ print(f"DEBUG: Saving chunk {chunk_number}/{total_chunks} for file {filename}")
104
+
105
+ # Initialize temporary directory for this file if it doesn't exist
106
+ if filename not in self._temp_dirs:
107
+ temp_dir = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
108
+ self._temp_dirs[filename] = {
109
+ "temp_dir": temp_dir,
110
+ "total_chunks": total_chunks,
111
+ "received_chunks": set(),
112
+ }
113
+ print(f"DEBUG: Created temp dir {temp_dir} for file {filename}")
114
+
115
+ # Save the chunk to the temporary directory
116
+ chunk_filename = os.path.join(
117
+ self._temp_dirs[filename]["temp_dir"], f"chunk_{chunk_number}"
118
+ )
119
+ with open(chunk_filename, "wb") as f:
120
+ f.write(file_data)
121
+
122
+ # Mark this chunk as received
123
+ self._temp_dirs[filename]["received_chunks"].add(chunk_number)
124
+ print(
125
+ f"DEBUG: Saved chunk {chunk_number}, total received: {len(self._temp_dirs[filename]['received_chunks'])}/{total_chunks}"
126
+ )
127
+
128
+ def _are_all_chunks_received(self, filename: str, total_chunks: int) -> bool:
129
+ """Check if all chunks for a file have been received."""
130
+ if filename not in self._temp_dirs:
131
+ print(f"DEBUG: No temp dir found for {filename}")
132
+ return False
133
+
134
+ received_chunks = self._temp_dirs[filename]["received_chunks"]
135
+ is_complete = len(received_chunks) == total_chunks
136
+ print(
137
+ f"DEBUG: Checking completion for {filename}: {len(received_chunks)}/{total_chunks} chunks received, complete: {is_complete}"
138
+ )
139
+ return is_complete
140
+
141
+ def _reconstruct_file(
142
+ self, filename: str, total_chunks: int, notebook_dir: str
143
+ ) -> None:
144
+ """Reconstruct the final file from all chunks and clean up temporary directory."""
145
+ print(f"DEBUG: Starting reconstruction for {filename}")
146
+
147
+ if filename not in self._temp_dirs:
148
+ raise ValueError(f"No temporary directory found for file: {filename}")
149
+
150
+ temp_dir = self._temp_dirs[filename]["temp_dir"]
151
+ file_path = os.path.join(notebook_dir, filename)
152
+
153
+ print(f"DEBUG: Reconstructing from {temp_dir} to {file_path}")
154
+
155
+ try:
156
+ # Reconstruct the file from chunks
157
+ with open(file_path, "wb") as final_file:
158
+ for i in range(1, total_chunks + 1):
159
+ chunk_filename = os.path.join(temp_dir, f"chunk_{i}")
160
+ print(f"DEBUG: Reading chunk {i} from {chunk_filename}")
161
+ with open(chunk_filename, "rb") as chunk_file:
162
+ chunk_data = chunk_file.read()
163
+ final_file.write(chunk_data)
164
+ print(f"DEBUG: Wrote {len(chunk_data)} bytes from chunk {i}")
165
+
166
+ print(f"DEBUG: Successfully reconstructed {filename}")
167
+ finally:
168
+ # Clean up the temporary directory
169
+ print(f"DEBUG: Cleaning up temp dir for {filename}")
170
+ self._cleanup_temp_dir(filename)
171
+
172
+ def _cleanup_temp_dir(self, filename: str) -> None:
173
+ """Clean up the temporary directory for a file."""
174
+ if filename in self._temp_dirs:
175
+ temp_dir = self._temp_dirs[filename]["temp_dir"]
176
+ try:
177
+ import shutil
178
+
179
+ shutil.rmtree(temp_dir)
180
+ except Exception as e:
181
+ # Log the error but don't fail the upload
182
+ print(
183
+ f"Warning: Failed to clean up temporary directory {temp_dir}: {e}"
184
+ )
185
+ finally:
186
+ # Remove from tracking dictionary
187
+ del self._temp_dirs[filename]
188
+
189
+ def _send_chunk_complete_response(self, filename: str, notebook_dir: str) -> None:
190
+ """Send response indicating all chunks have been processed and file is complete."""
191
+ file_path = os.path.join(notebook_dir, filename)
192
+ self.write(
193
+ {
194
+ "success": True,
195
+ "filename": filename,
196
+ "path": file_path,
197
+ "chunk_complete": True,
198
+ }
199
+ )
200
+
201
+ def _send_chunk_received_response(
202
+ self, chunk_number: int, total_chunks: int
203
+ ) -> None:
204
+ """Send response indicating a chunk was received but file is not yet complete."""
205
+ self.write(
206
+ {
207
+ "success": True,
208
+ "chunk_received": True,
209
+ "chunk_number": chunk_number,
210
+ "total_chunks": total_chunks,
211
+ }
212
+ )
213
+
214
+ def _handle_error(self, error_message: str, status_code: int = 500) -> None:
215
+ """Handle errors and send appropriate error response."""
216
+ log_file_upload_failure(error_message)
217
+ self.set_status(status_code)
218
+ self.write({"error": error_message})
219
+ self.finish()
220
+
221
+ def on_finish(self) -> None:
222
+ """Clean up any remaining temporary directories when the handler is finished."""
223
+ super().on_finish()
224
+ # Note: We don't clean up here anymore since we want to preserve state across requests
225
+ # The cleanup happens when the file is fully reconstructed
@@ -0,0 +1,21 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List, Tuple, Any
5
+ from jupyter_server.utils import url_path_join
6
+ from mito_ai.file_uploads.handlers import FileUploadHandler
7
+
8
+
9
+ def get_file_uploads_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
10
+ """Get all file uploads related URL patterns.
11
+
12
+ Args:
13
+ base_url: The base URL for the Jupyter server
14
+
15
+ Returns:
16
+ List of (url_pattern, handler_class, handler_kwargs) tuples
17
+ """
18
+ BASE_URL = base_url + "/mito-ai"
19
+ return [
20
+ (url_path_join(BASE_URL, "upload"), FileUploadHandler, {}),
21
+ ]
mito_ai/openai_client.py CHANGED
@@ -35,7 +35,7 @@ from mito_ai.utils.telemetry_utils import (
35
35
  USER_KEY,
36
36
  )
37
37
 
38
- OPENAI_MODEL_FALLBACK = "gpt-5"
38
+ OPENAI_MODEL_FALLBACK = "gpt-4.1"
39
39
 
40
40
  class OpenAIClient(LoggingConfigurable):
41
41
  """Provide AI feature through OpenAI services."""
@@ -0,0 +1,51 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from mito_ai.completions.completion_handlers.utils import create_ai_optimized_message
5
+
6
+
7
+ def test_text_only_message():
8
+ """Test scenario where the user only inputs text"""
9
+ result = create_ai_optimized_message("Hello world")
10
+
11
+ assert result["role"] == "user"
12
+ assert result["content"] == "Hello world"
13
+
14
+
15
+ def test_message_with_uploaded_image():
16
+ """Test scenario where the user uploads an image"""
17
+ result = create_ai_optimized_message(
18
+ text="Analyze this", base64EncodedUploadedImage="image_data"
19
+ )
20
+
21
+ assert result["role"] == "user"
22
+ assert isinstance(result["content"], list)
23
+ assert result["content"][0]["type"] == "text"
24
+ assert result["content"][1]["type"] == "image_url"
25
+
26
+
27
+ def test_message_with_active_cell_output():
28
+ """Test scenario where the active cell has an output"""
29
+ result = create_ai_optimized_message(
30
+ text="Analyze this", base64EncodedActiveCellOutput="cell_output_data"
31
+ )
32
+
33
+ assert result["role"] == "user"
34
+ assert isinstance(result["content"], list)
35
+ assert result["content"][0]["type"] == "text"
36
+ assert result["content"][1]["type"] == "image_url"
37
+
38
+
39
+ def test_message_with_uploaded_image_and_active_cell_output():
40
+ """Test scenario where the user uploads an image and the active cell has an output"""
41
+ result = create_ai_optimized_message(
42
+ text="Analyze this",
43
+ base64EncodedUploadedImage="image_data",
44
+ base64EncodedActiveCellOutput="cell_output_data",
45
+ )
46
+
47
+ assert result["role"] == "user"
48
+ assert isinstance(result["content"], list)
49
+ assert result["content"][0]["type"] == "text"
50
+ assert result["content"][1]["type"] == "image_url"
51
+ assert result["content"][2]["type"] == "image_url"
@@ -0,0 +1,2 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
@@ -0,0 +1,267 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import os
5
+ import tempfile
6
+ import pytest
7
+ from unittest.mock import Mock, patch
8
+ import tornado.web
9
+ from tornado.httputil import HTTPServerRequest
10
+ from tornado.web import Application
11
+
12
+ from mito_ai.file_uploads.handlers import FileUploadHandler
13
+
14
+
15
+ @pytest.fixture
16
+ def temp_dir():
17
+ """Create a temporary directory for test files."""
18
+ temp_dir = tempfile.mkdtemp()
19
+ original_cwd = os.getcwd()
20
+ os.chdir(temp_dir)
21
+ yield temp_dir
22
+ os.chdir(original_cwd)
23
+ # Clean up temporary files
24
+ for file in os.listdir(temp_dir):
25
+ os.remove(os.path.join(temp_dir, file))
26
+ os.rmdir(temp_dir)
27
+
28
+
29
+ @pytest.fixture
30
+ def handler():
31
+ """Create a FileUploadHandler instance for testing."""
32
+ app = Application()
33
+ request = HTTPServerRequest(method="POST", uri="/upload")
34
+
35
+ # Mock the connection to avoid Tornado's assertion
36
+ request.connection = Mock()
37
+
38
+ handler = FileUploadHandler(app, request)
39
+
40
+ # Mock methods properly to avoid mypy errors
41
+ handler.write = Mock() # type: ignore
42
+ handler.finish = Mock() # type: ignore
43
+ handler.set_status = Mock() # type: ignore
44
+ handler.get_argument = Mock() # type: ignore
45
+
46
+ # Mock authentication for Jupyter server
47
+ handler._jupyter_current_user = "test_user" # type: ignore
48
+
49
+ return handler
50
+
51
+
52
+ def test_validate_file_upload_success(handler):
53
+ """Test successful file upload validation."""
54
+ handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
55
+ result = handler._validate_file_upload()
56
+ assert result is True
57
+
58
+
59
+ def test_validate_file_upload_failure(handler):
60
+ """Test file upload validation when no file is present."""
61
+ handler.request.files = {} # type: ignore
62
+ result = handler._validate_file_upload()
63
+ assert result is False
64
+ handler.set_status.assert_called_with(400)
65
+
66
+
67
+ def test_regular_upload_success(handler, temp_dir):
68
+ """Test successful regular (non-chunked) file upload."""
69
+ filename = "test.csv"
70
+ file_data = b"test,data\n1,2"
71
+ notebook_dir = temp_dir
72
+
73
+ handler._handle_regular_upload(filename, file_data, notebook_dir)
74
+
75
+ # Verify file was written
76
+ file_path = os.path.join(notebook_dir, filename)
77
+ with open(file_path, "rb") as f:
78
+ content = f.read()
79
+ assert content == file_data
80
+
81
+ # Verify response
82
+ handler.write.assert_called_with(
83
+ {"success": True, "filename": filename, "path": file_path}
84
+ )
85
+
86
+
87
+ def test_chunked_upload_first_chunk(handler, temp_dir):
88
+ """Test handling first chunk of a chunked upload."""
89
+ filename = "large_file.csv"
90
+ file_data = b"chunk1_data"
91
+ chunk_number = "1"
92
+ total_chunks = "3"
93
+ notebook_dir = temp_dir
94
+
95
+ handler._handle_chunked_upload(
96
+ filename, file_data, chunk_number, total_chunks, notebook_dir
97
+ )
98
+
99
+ # Verify chunk was saved (check temp dir structure)
100
+ assert filename in handler._temp_dirs
101
+ temp_dir_path = handler._temp_dirs[filename]["temp_dir"]
102
+ chunk_file = os.path.join(temp_dir_path, "chunk_1")
103
+ assert os.path.exists(chunk_file)
104
+
105
+ # Verify response indicates chunk received but not complete
106
+ handler.write.assert_called_with(
107
+ {
108
+ "success": True,
109
+ "chunk_received": True,
110
+ "chunk_number": 1,
111
+ "total_chunks": 3,
112
+ }
113
+ )
114
+
115
+
116
+ def test_chunked_upload_completion(handler, temp_dir):
117
+ """Test completing a chunked upload when all chunks are received."""
118
+ filename = "large_file.csv"
119
+ total_chunks = 2
120
+ notebook_dir = temp_dir
121
+
122
+ # Process first chunk
123
+ handler._handle_chunked_upload(
124
+ filename, b"chunk1_data", "1", str(total_chunks), notebook_dir
125
+ )
126
+
127
+ # Process final chunk
128
+ handler._handle_chunked_upload(
129
+ filename, b"chunk2_data", "2", str(total_chunks), notebook_dir
130
+ )
131
+
132
+ # Verify final file was created
133
+ file_path = os.path.join(notebook_dir, filename)
134
+ assert os.path.exists(file_path)
135
+ with open(file_path, "rb") as f:
136
+ content = f.read()
137
+ assert content == b"chunk1_datachunk2_data"
138
+
139
+ # Verify temp dir was cleaned up
140
+ assert filename not in handler._temp_dirs
141
+
142
+ # Verify completion response
143
+ handler.write.assert_called_with(
144
+ {
145
+ "success": True,
146
+ "filename": filename,
147
+ "path": file_path,
148
+ "chunk_complete": True,
149
+ }
150
+ )
151
+
152
+
153
+ def test_error_handling(handler):
154
+ """Test error handling in upload process."""
155
+ error_message = "Test error message"
156
+ status_code = 500
157
+
158
+ handler._handle_error(error_message, status_code)
159
+
160
+ handler.set_status.assert_called_with(status_code)
161
+ handler.write.assert_called_with({"error": error_message})
162
+ handler.finish.assert_called_once()
163
+
164
+
165
+ @patch("mito_ai.file_uploads.handlers.FileUploadHandler._validate_file_upload")
166
+ def test_post_method_regular_upload(mock_validate, handler):
167
+ """Test POST method for regular upload."""
168
+ mock_validate.return_value = True
169
+ handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
170
+ handler.get_argument.return_value = None # No chunk parameters
171
+
172
+ handler.post()
173
+
174
+ mock_validate.assert_called_once()
175
+ handler.finish.assert_called_once()
176
+
177
+
178
+ @patch("mito_ai.file_uploads.handlers.FileUploadHandler._validate_file_upload")
179
+ def test_post_method_chunked_upload(mock_validate, handler):
180
+ """Test POST method for chunked upload."""
181
+ mock_validate.return_value = True
182
+ handler.request.files = {"file": [Mock(filename="test.csv", body=b"data")]} # type: ignore
183
+ handler.get_argument.side_effect = lambda name, default=None: {
184
+ "chunk_number": "1",
185
+ "total_chunks": "3",
186
+ }.get(name, default)
187
+
188
+ handler.post()
189
+
190
+ mock_validate.assert_called_once()
191
+ handler.finish.assert_called_once()
192
+
193
+
194
+ def test_are_all_chunks_received_true(handler, temp_dir):
195
+ """Test that all chunks are detected when present."""
196
+ filename = "test.csv"
197
+ total_chunks = 2
198
+
199
+ # Manually set up the temp dir structure
200
+ temp_dir_path = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
201
+ handler._temp_dirs[filename] = {
202
+ "temp_dir": temp_dir_path,
203
+ "total_chunks": total_chunks,
204
+ "received_chunks": {1, 2},
205
+ }
206
+
207
+ result = handler._are_all_chunks_received(filename, total_chunks)
208
+ assert result is True
209
+
210
+ # Clean up
211
+ import shutil
212
+
213
+ shutil.rmtree(temp_dir_path)
214
+
215
+
216
+ def test_are_all_chunks_received_false(handler, temp_dir):
217
+ """Test that missing chunks are detected."""
218
+ filename = "test.csv"
219
+ total_chunks = 2
220
+
221
+ # Manually set up the temp dir structure with only one chunk
222
+ temp_dir_path = tempfile.mkdtemp(prefix=f"mito_upload_{filename}_")
223
+ handler._temp_dirs[filename] = {
224
+ "temp_dir": temp_dir_path,
225
+ "total_chunks": total_chunks,
226
+ "received_chunks": {1}, # Only chunk 1 received
227
+ }
228
+
229
+ result = handler._are_all_chunks_received(filename, total_chunks)
230
+ assert result is False
231
+
232
+ # Clean up
233
+ import shutil
234
+
235
+ shutil.rmtree(temp_dir_path)
236
+
237
+
238
+ def test_save_chunk(handler, temp_dir):
239
+ """Test saving individual chunks."""
240
+ filename = "test.csv"
241
+ file_data = b"chunk_data"
242
+ chunk_number = 1
243
+ total_chunks = 3
244
+
245
+ # Mock the file operations to avoid filesystem issues
246
+ with patch("builtins.open", create=True) as mock_open:
247
+ mock_file = Mock()
248
+ mock_open.return_value.__enter__.return_value = mock_file
249
+
250
+ handler._save_chunk(filename, file_data, chunk_number, total_chunks)
251
+
252
+ # Verify temp dir was created in the handler's tracking
253
+ assert filename in handler._temp_dirs
254
+ temp_dir_path = handler._temp_dirs[filename]["temp_dir"]
255
+
256
+ # Verify the expected chunk filename was used
257
+ expected_chunk_filename = os.path.join(temp_dir_path, f"chunk_{chunk_number}")
258
+ mock_open.assert_called_with(expected_chunk_filename, "wb")
259
+
260
+ # Verify file data was written
261
+ mock_file.write.assert_called_with(file_data)
262
+
263
+ # Verify chunk was marked as received
264
+ assert chunk_number in handler._temp_dirs[filename]["received_chunks"]
265
+
266
+ # Clean up
267
+ del handler._temp_dirs[filename]
@@ -7,9 +7,9 @@ from openai.types.chat import ChatCompletionMessageParam
7
7
  from mito_ai.utils.message_history_utils import trim_sections_from_message_content, trim_old_messages
8
8
  from mito_ai.completions.prompt_builders.chat_prompt import create_chat_prompt
9
9
  from mito_ai.completions.prompt_builders.agent_execution_prompt import create_agent_execution_prompt
10
- from mito_ai.completions.prompt_builders.agent_smart_debug_prompt import (
11
- create_agent_smart_debug_prompt,
12
- )
10
+ from mito_ai.completions.prompt_builders.agent_smart_debug_prompt import create_agent_smart_debug_prompt
11
+ from unittest.mock import Mock, patch
12
+ from mito_ai.completions.message_history import GlobalMessageHistory, ChatThread
13
13
  from mito_ai.completions.prompt_builders.smart_debug_prompt import create_error_prompt
14
14
  from mito_ai.completions.prompt_builders.explain_code_prompt import create_explain_code_prompt
15
15
  from mito_ai.completions.models import (
@@ -27,6 +27,9 @@ from mito_ai.completions.prompt_builders.prompt_constants import (
27
27
  CONTENT_REMOVED_PLACEHOLDER,
28
28
  )
29
29
 
30
+
31
+
32
+
30
33
  # Standard test data for multiple tests
31
34
  TEST_VARIABLES = ["'df': pd.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]})"]
32
35
  TEST_FILES = ["data.csv", "script.py"]
@@ -386,4 +389,54 @@ def test_trim_mixed_content_messages() -> None:
386
389
 
387
390
  # Verify that the recent messages are untouched
388
391
  assert trimmed_messages[1] == message_list[1]
389
- assert trimmed_messages[2] == message_list[2]
392
+ assert trimmed_messages[2] == message_list[2]
393
+
394
+
395
+ def test_get_display_history_calls_update_last_interaction() -> None:
396
+ """Test that get_display_history calls _update_last_interaction when retrieving a thread."""
397
+
398
+ # Create a mock thread
399
+ thread_id = ThreadID("test-thread-id")
400
+ mock_thread = Mock(spec=ChatThread)
401
+ mock_thread.display_history = [{"role": "user", "content": "test message"}]
402
+ mock_thread.last_interaction_ts = 1234567890.0
403
+
404
+ # Create message history instance and add the mock thread
405
+ message_history = GlobalMessageHistory()
406
+ message_history._chat_threads = {thread_id: mock_thread}
407
+
408
+ # Mock the _update_last_interaction method
409
+ with patch.object(message_history, '_update_last_interaction') as mock_update:
410
+ with patch.object(message_history, '_save_thread_to_disk') as mock_save:
411
+ # Call get_display_history
412
+ result = message_history.get_display_history(thread_id)
413
+
414
+ # Verify _update_last_interaction was called with the thread
415
+ mock_update.assert_called_once_with(mock_thread)
416
+
417
+ # Verify _save_thread_to_disk was also called
418
+ mock_save.assert_called_once_with(mock_thread)
419
+
420
+ # Verify the result is correct
421
+ assert result == [{"role": "user", "content": "test message"}]
422
+
423
+
424
+ def test_get_display_history_returns_empty_for_nonexistent_thread() -> None:
425
+ """Test that get_display_history returns empty list for non-existent thread."""
426
+ from mito_ai.completions.message_history import GlobalMessageHistory
427
+ from mito_ai.completions.models import ThreadID
428
+
429
+ message_history = GlobalMessageHistory()
430
+ thread_id = ThreadID("nonexistent-thread-id")
431
+
432
+ # Mock the methods to ensure they're not called
433
+ with patch.object(message_history, '_update_last_interaction') as mock_update:
434
+ with patch.object(message_history, '_save_thread_to_disk') as mock_save:
435
+ result = message_history.get_display_history(thread_id)
436
+
437
+ # Verify methods were not called since thread doesn't exist
438
+ mock_update.assert_not_called()
439
+ mock_save.assert_not_called()
440
+
441
+ # Verify empty result
442
+ assert result == []
@@ -11,6 +11,7 @@ from tornado.httpclient import HTTPResponse
11
11
  from mito_ai.constants import MITO_GEMINI_URL
12
12
  from mito_ai.utils.utils import _create_http_client
13
13
 
14
+ MITO_ERROR_MARKER = "MITO_ERROR_MARKER:"
14
15
 
15
16
  class ProviderCompletionException(Exception):
16
17
  """Custom exception for Mito server errors that converts well to CompletionError."""
@@ -179,6 +180,12 @@ async def stream_response_from_mito_server(
179
180
  if chunk_processor:
180
181
  processed_chunk = chunk_processor(chunk)
181
182
 
183
+ # Check if this chunk contains an error marker
184
+ if processed_chunk.startswith(MITO_ERROR_MARKER):
185
+ error_message = processed_chunk[len(MITO_ERROR_MARKER):]
186
+ print(f"Detected error in {provider_name} stream: {error_message}")
187
+ raise ProviderCompletionException(error_message, provider_name=provider_name)
188
+
182
189
  if reply_fn is not None and message_id is not None:
183
190
  # Send the chunk directly to the frontend
184
191
  reply_fn(CompletionStreamChunk(
@@ -28,7 +28,7 @@ free tier, but running AI models is expensive, so we need to limit the usage
28
28
  or we will no longer be able to provide this free tier.
29
29
  """
30
30
  # Monthly chat completions limit for free tier users
31
- OS_MONTHLY_AI_COMPLETIONS_LIMIT: Final[int] = 50
31
+ OS_MONTHLY_AI_COMPLETIONS_LIMIT: Final[int] = 150
32
32
 
33
33
  # Monthly autocomplete limit for free tier users
34
34
  OS_MONTHLY_AUTOCOMPLETE_LIMIT: Final[int] = 5000