mito-ai 0.1.44__py3-none-any.whl → 0.1.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (52) hide show
  1. mito_ai/_version.py +1 -1
  2. mito_ai/anthropic_client.py +2 -3
  3. mito_ai/app_deploy/app_deploy_utils.py +25 -0
  4. mito_ai/app_deploy/handlers.py +9 -12
  5. mito_ai/app_deploy/models.py +4 -1
  6. mito_ai/completions/handlers.py +27 -1
  7. mito_ai/completions/models.py +1 -0
  8. mito_ai/completions/prompt_builders/prompt_constants.py +22 -4
  9. mito_ai/streamlit_conversion/streamlit_agent_handler.py +5 -2
  10. mito_ai/streamlit_conversion/streamlit_utils.py +5 -7
  11. mito_ai/streamlit_conversion/validate_streamlit_app.py +34 -25
  12. mito_ai/streamlit_preview/handlers.py +3 -0
  13. mito_ai/tests/deploy_app/test_app_deploy_utils.py +71 -0
  14. mito_ai/tests/providers/test_anthropic_client.py +2 -2
  15. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +0 -84
  16. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +0 -15
  17. mito_ai/tests/utils/test_anthropic_utils.py +4 -4
  18. mito_ai/utils/anthropic_utils.py +11 -19
  19. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +100 -100
  20. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  21. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  22. mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.cf2e3ad2797fbb53826b.js → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js +543 -105
  23. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js.map +1 -0
  24. mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.5482493d1270f55b7283.js → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js +16 -16
  25. mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.5482493d1270f55b7283.js.map → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js.map +1 -1
  26. {mito_ai-0.1.44.dist-info → mito_ai-0.1.45.dist-info}/METADATA +2 -2
  27. {mito_ai-0.1.44.dist-info → mito_ai-0.1.45.dist-info}/RECORD +51 -49
  28. mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.cf2e3ad2797fbb53826b.js.map +0 -1
  29. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  30. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  31. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  32. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  33. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  34. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  35. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  36. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  37. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  38. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  39. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  40. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  41. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  42. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  43. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  44. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  45. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  46. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  47. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  48. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  49. {mito_ai-0.1.44.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  50. {mito_ai-0.1.44.dist-info → mito_ai-0.1.45.dist-info}/WHEEL +0 -0
  51. {mito_ai-0.1.44.dist-info → mito_ai-0.1.45.dist-info}/entry_points.txt +0 -0
  52. {mito_ai-0.1.44.dist-info → mito_ai-0.1.45.dist-info}/licenses/LICENSE +0 -0
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.44'
4
+ __version__ = VERSION = '0.1.45'
@@ -52,12 +52,12 @@ def extract_and_parse_anthropic_json_response(response: Message) -> Union[object
52
52
 
53
53
 
54
54
  def get_anthropic_system_prompt_and_messages(messages: List[ChatCompletionMessageParam]) -> Tuple[
55
- Union[str, anthropic.NotGiven], List[MessageParam]]:
55
+ Union[str, anthropic.Omit], List[MessageParam]]:
56
56
  """
57
57
  Convert a list of OpenAI messages to a list of Anthropic messages.
58
58
  """
59
59
 
60
- system_prompt: Union[str, anthropic.NotGiven] = anthropic.NotGiven()
60
+ system_prompt: Union[str, anthropic.Omit] = anthropic.Omit()
61
61
  anthropic_messages: List[MessageParam] = []
62
62
 
63
63
  for message in messages:
@@ -206,7 +206,6 @@ class AnthropicClient:
206
206
  stream=True
207
207
  )
208
208
 
209
-
210
209
  for chunk in stream:
211
210
  if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
212
211
  content = chunk.delta.text
@@ -0,0 +1,25 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import os
5
+ import zipfile
6
+ import logging
7
+ from typing import List, Optional
8
+
9
+ def add_files_to_zip(zip_path: str, base_path: str, files_to_add: List[str], logger: Optional[logging.Logger] = None) -> None:
10
+ """Create a zip file at zip_path and add the selected files/folders."""
11
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
12
+ for rel_path in files_to_add:
13
+ abs_path = os.path.join(base_path, rel_path)
14
+
15
+ if os.path.isfile(abs_path):
16
+ zipf.write(abs_path, arcname=rel_path)
17
+ elif os.path.isdir(abs_path):
18
+ for root, _, files in os.walk(abs_path):
19
+ for file in files:
20
+ file_abs = os.path.join(root, file)
21
+ arcname = os.path.relpath(file_abs, base_path)
22
+ zipf.write(file_abs, arcname=arcname)
23
+ else:
24
+ if logger:
25
+ logger.warning(f"Skipping missing file: {abs_path}")
@@ -4,13 +4,13 @@
4
4
  import os
5
5
  import time
6
6
  import logging
7
- from typing import Any, Union, Optional
8
- import zipfile
7
+ from typing import Any, Union, List
9
8
  import tempfile
10
9
  from mito_ai.streamlit_conversion.streamlit_utils import get_app_path
11
10
  from mito_ai.utils.create import initialize_user
12
11
  from mito_ai.utils.version_utils import is_pro
13
12
  from mito_ai.utils.websocket_base import BaseWebSocketHandler
13
+ from mito_ai.app_deploy.app_deploy_utils import add_files_to_zip
14
14
  from mito_ai.app_deploy.models import (
15
15
  DeployAppReply,
16
16
  AppDeployError,
@@ -18,7 +18,6 @@ from mito_ai.app_deploy.models import (
18
18
  ErrorMessage,
19
19
  MessageType
20
20
  )
21
- from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
22
21
  from mito_ai.logger import get_logger
23
22
  from mito_ai.constants import ACTIVE_STREAMLIT_BASE_URL
24
23
  import requests
@@ -111,6 +110,7 @@ class AppDeployHandler(BaseWebSocketHandler):
111
110
  message_id = message.message_id
112
111
  notebook_path = message.notebook_path
113
112
  jwt_token = message.jwt_token
113
+ files_to_upload = message.selected_files
114
114
 
115
115
  if not message_id:
116
116
  self.log.error("Missing message_id in request")
@@ -168,7 +168,7 @@ class AppDeployHandler(BaseWebSocketHandler):
168
168
  ))
169
169
 
170
170
  # Finally, deploy the app
171
- deploy_url = await self._deploy_app(app_directory, jwt_token)
171
+ deploy_url = await self._deploy_app(app_directory, files_to_upload, jwt_token)
172
172
 
173
173
  # Send the response
174
174
  self.reply(DeployAppReply(
@@ -219,11 +219,12 @@ class AppDeployHandler(BaseWebSocketHandler):
219
219
  return False
220
220
 
221
221
 
222
- async def _deploy_app(self, app_path: str, jwt_token: str = '') -> str:
222
+ async def _deploy_app(self, app_path: str, files_to_upload:List[str], jwt_token: str = '') -> str:
223
223
  """Deploy the app using pre-signed URLs.
224
224
 
225
225
  Args:
226
226
  app_path: Path to the app file.
227
+ files_to_upload: Files the user selected to upload for the app to run
227
228
  jwt_token: JWT token for authentication (optional)
228
229
 
229
230
  Returns:
@@ -258,16 +259,12 @@ class AppDeployHandler(BaseWebSocketHandler):
258
259
  # Step 2: Create a zip file of the app.
259
260
  temp_zip_path = None
260
261
  try:
261
- # Create temp file and close it before writing to avoid file handle conflicts
262
- with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as temp_zip:
262
+ # Create temp file
263
+ with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as temp_zip:
263
264
  temp_zip_path = temp_zip.name
264
265
 
265
266
  self.log.info("Zipping application files...")
266
- with zipfile.ZipFile(temp_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
267
- for root, _, files in os.walk(app_path):
268
- for file in files:
269
- file_path = os.path.join(root, file)
270
- zipf.write(file_path, arcname=os.path.relpath(file_path, app_path))
267
+ add_files_to_zip(temp_zip_path, app_path, files_to_upload, self.log)
271
268
 
272
269
  upload_response = await self._upload_app_to_s3(temp_zip_path, presigned_url)
273
270
  except Exception as e:
@@ -3,7 +3,7 @@
3
3
 
4
4
  from dataclasses import dataclass
5
5
  from enum import Enum
6
- from typing import Literal, Optional
6
+ from typing import Literal, Optional, List
7
7
 
8
8
 
9
9
  class MessageType(str, Enum):
@@ -66,6 +66,9 @@ class DeployAppRequest:
66
66
 
67
67
  # Path to the app file.
68
68
  notebook_path: str
69
+
70
+ # Files to be uploaded for the app to run
71
+ selected_files: List[str]
69
72
 
70
73
  # JWT token for authorization.
71
74
  jwt_token: Optional[str] = None
@@ -14,6 +14,7 @@ import tornado.web
14
14
  from jupyter_core.utils import ensure_async
15
15
  from jupyter_server.base.handlers import JupyterHandler
16
16
  from tornado.websocket import WebSocketHandler
17
+ from openai.types.chat import ChatCompletionMessageParam
17
18
  from mito_ai.completions.message_history import GlobalMessageHistory
18
19
  from mito_ai.logger import get_logger
19
20
  from mito_ai.completions.models import (
@@ -222,7 +223,32 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
222
223
  )
223
224
  self.reply(reply)
224
225
  return
225
-
226
+
227
+ if type == MessageType.STOP_AGENT:
228
+ thread_id_to_stop = metadata_dict.get('threadId')
229
+ if thread_id_to_stop:
230
+ self.log.info(f"Stopping agent, thread ID: {thread_id_to_stop}")
231
+
232
+ ai_optimized_message: ChatCompletionMessageParam = {
233
+ "role": "assistant",
234
+ "content": "The user made the following request: Stop processing my last request. I want to change it. Please answer my future requests without going back and finising my previous request."
235
+ }
236
+ display_optimized_message: ChatCompletionMessageParam = {
237
+ "role": "assistant",
238
+ "content": "Agent interupted by user "
239
+ }
240
+
241
+ await message_history.append_message(
242
+ ai_optimized_message=ai_optimized_message,
243
+ display_message=display_optimized_message,
244
+ model=self._selected_model,
245
+ llm_provider=self._llm,
246
+ thread_id=thread_id_to_stop
247
+ )
248
+ else:
249
+ self.log.info("Trying to stop agent, but no thread ID available")
250
+ return
251
+
226
252
  try:
227
253
  # Get completion based on message type
228
254
  completion = None
@@ -64,6 +64,7 @@ class MessageType(Enum):
64
64
  DELETE_THREAD = "delete_thread"
65
65
  UPDATE_MODEL_CONFIG = "update_model_config"
66
66
  STREAMLIT_CONVERSION = "streamlit_conversion"
67
+ STOP_AGENT = "stop_agent"
67
68
 
68
69
 
69
70
  @dataclass(frozen=True)
@@ -125,15 +125,33 @@ If the user has requested data that you believe is stored in the database:
125
125
  connections[connection_name]["username"]
126
126
  ```
127
127
 
128
+ - The user may colloquially ask for a "list of x", always assume they want a pandas DataFrame.
129
+ - When working with dataframes created from an SQL query, ALWAYS use lowercase column names.
130
+ - If you think the requested data is stored in the database, but you are unsure, then ask the user for clarification.
131
+
132
+ ## Additional MSSQL Rules
133
+
134
+ - When connecting to a Microsoft SQL Server (MSSQL) database, use the following format:
135
+
136
+ ```
137
+ import urllib.parse
138
+
139
+ encoded_password = urllib.parse.quote_plus(password)
140
+ conn_str = f"mssql+pyodbc://username:encoded_password@host:port/database?driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
141
+ ```
142
+
143
+ - Always URL-encode passwords for MSSQL connections to handle special characters properly.
144
+ - Include the port number in MSSQL connection strings.
145
+ - Use "ODBC+Driver+18+for+SQL+Server" (with plus signs) in the driver parameter.
146
+ - Always include "TrustServerCertificate=yes" for MSSQL connections to avoid SSL certificate issues.
147
+
148
+ ## Additional Oracle Rules
149
+
128
150
  - When connecting to an Oracle database, use the following format:
129
151
  ```
130
152
  conn_str = f"oracle+oracledb://username:password@host:port?service_name=service_name"
131
153
  ```
132
154
 
133
- - The user may colloquially ask for a "list of x", always assume they want a pandas DataFrame.
134
- - When working with dataframes created from an SQL query, ALWAYS use lowercase column names.
135
- - If you think the requested data is stored in the database, but you are unsure, then ask the user for clarification.
136
-
137
155
  Here is the schema:
138
156
  {schemas}
139
157
  """
@@ -52,18 +52,19 @@ class StreamlitCodeGeneration:
52
52
  async def generate_streamlit_code(self, notebook: dict) -> str:
53
53
  """Send a query to the agent, get its response and parse the code"""
54
54
 
55
+ prompt_text = get_streamlit_app_creation_prompt(notebook)
56
+
55
57
  messages: List[MessageParam] = [
56
58
  cast(MessageParam, {
57
59
  "role": "user",
58
60
  "content": [{
59
61
  "type": "text",
60
- "text": get_streamlit_app_creation_prompt(notebook)
62
+ "text": prompt_text
61
63
  }]
62
64
  })
63
65
  ]
64
66
 
65
67
  agent_response = await self.get_response_from_agent(messages)
66
-
67
68
  converted_code = extract_code_blocks(agent_response)
68
69
 
69
70
  # Extract the TODOs from the agent's response
@@ -123,6 +124,7 @@ async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], st
123
124
 
124
125
  notebook_code = parse_jupyter_notebook_to_extract_required_content(notebook_path)
125
126
  streamlit_code_generator = StreamlitCodeGeneration()
127
+
126
128
  streamlit_code = await streamlit_code_generator.generate_streamlit_code(notebook_code)
127
129
 
128
130
  has_validation_error, errors = validate_app(streamlit_code, notebook_path)
@@ -149,6 +151,7 @@ async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], st
149
151
  absolute_notebook_path = os.path.join(os.getcwd(), notebook_path)
150
152
 
151
153
  app_directory = os.path.dirname(absolute_notebook_path)
154
+
152
155
  success_flag, app_path, message = create_app_file(app_directory, streamlit_code)
153
156
 
154
157
  if not success_flag:
@@ -25,7 +25,8 @@ def extract_code_blocks(message_content: str) -> str:
25
25
  matches = re.findall(pattern, message_content, re.DOTALL)
26
26
 
27
27
  # Concatenate with single newlines
28
- return '\n'.join(matches)
28
+ result = '\n'.join(matches)
29
+ return result
29
30
 
30
31
  def extract_unified_diff_blocks(message_content: str) -> str:
31
32
  """
@@ -53,8 +54,10 @@ def create_app_file(app_directory: str, code: str) -> Tuple[bool, str, str]:
53
54
  """
54
55
  try:
55
56
  app_path = os.path.join(app_directory, "app.py")
56
- with open(app_path, 'w') as f:
57
+
58
+ with open(app_path, 'w', encoding='utf-8') as f:
57
59
  f.write(code)
60
+
58
61
  return True, app_path, f"Successfully created {app_directory}"
59
62
  except IOError as e:
60
63
  return False, '', f"Error creating file: {str(e)}"
@@ -139,8 +142,3 @@ def clean_directory_check(notebook_path: str) -> None:
139
142
 
140
143
  if not dir_path.exists():
141
144
  raise ValueError(f"Directory does not exist: {dir_path}")
142
-
143
- file_count = len([f for f in dir_path.iterdir() if f.is_file()])
144
- if file_count > 10:
145
- raise ValueError(
146
- f"Too many files in directory: 10 allowed but {file_count} present. Create a new directory and retry")
@@ -25,7 +25,7 @@ warnings.filterwarnings("ignore", message=".*bare mode.*")
25
25
 
26
26
  class StreamlitValidator:
27
27
  def __init__(self, port: int = 8501) -> None:
28
- self.temp_dir: Optional[str] = None
28
+ pass
29
29
 
30
30
  def get_syntax_error(self, app_code: str) -> Optional[str]:
31
31
  """Check if the Python code has valid syntax"""
@@ -58,26 +58,40 @@ class StreamlitValidator:
58
58
  os.chdir(original_cwd)
59
59
 
60
60
  with change_working_directory(directory):
61
- app_test = AppTest.from_string(app_code, default_timeout=30)
62
- app_test.run()
63
-
64
- # Check for exceptions
65
- if app_test.exception:
66
- errors = [{'type': 'exception', 'details': exc.value, 'message': exc.message, 'stack_trace': exc.stack_trace} for exc in app_test.exception]
67
- return errors
68
-
69
- # Check for error messages
70
- if app_test.error:
71
- errors = [{'type': 'error', 'details': err.value} for err in app_test.error]
72
- return errors
73
-
74
- return None
61
+ # Create a temporary file that uses UTF-8 encoding so
62
+ # we don't run into issues with non-ASCII characters on Windows.
63
+ # We use utf-8 encoding when writing the app.py file so this validation
64
+ # code mirrors the actual file.
65
+
66
+ # Note: Since the AppTest.from_file tries to open the file, we need to first close the file
67
+ # by exiting the context manager and using the delete=False flag so that the file still exists.
68
+ # Windows can't open the same file twice at the same time. We cleanup at the end.
69
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False, encoding="utf-8") as f:
70
+ f.write(app_code)
71
+ temp_path = f.name
75
72
 
76
- def cleanup(self) -> None:
77
- """Clean up the temporary files"""
78
- if self.temp_dir and os.path.exists(self.temp_dir):
79
- shutil.rmtree(self.temp_dir)
80
- self.temp_dir = None
73
+ try:
74
+ # Run Streamlit test from file with UTF-8 encoding
75
+ app_test = AppTest.from_file(temp_path, default_timeout=30)
76
+ app_test.run()
77
+
78
+ # Check for exceptions
79
+ if app_test.exception:
80
+ errors = [{'type': 'exception', 'details': exc.value, 'message': exc.message, 'stack_trace': exc.stack_trace} for exc in app_test.exception]
81
+ return errors
82
+
83
+ # Check for error messages
84
+ if app_test.error:
85
+ errors = [{'type': 'error', 'details': err.value} for err in app_test.error]
86
+ return errors
87
+
88
+ return None
89
+ finally:
90
+ # Clean up the temporary file
91
+ try:
92
+ os.unlink(temp_path)
93
+ except OSError:
94
+ pass # File might already be deleted
81
95
 
82
96
  def _validate_app(self, app_code: str, app_path: str) -> List[Dict[str, Any]]:
83
97
  """Complete validation pipeline"""
@@ -91,17 +105,12 @@ class StreamlitValidator:
91
105
 
92
106
  runtime_errors = self.get_runtime_errors(app_code, app_path)
93
107
 
94
- print('Found Runtime Errors', runtime_errors)
95
-
96
108
  if runtime_errors:
97
109
  errors.extend(runtime_errors)
98
110
 
99
111
  except Exception as e:
100
112
  errors.append({'type': 'validation', 'details': str(e)})
101
113
 
102
- finally:
103
- self.cleanup()
104
-
105
114
  return errors
106
115
 
107
116
  def validate_app(app_code: str, notebook_path: str) -> Tuple[bool, List[str]]:
@@ -88,9 +88,12 @@ class StreamlitPreviewHandler(APIHandler):
88
88
  self.finish({"error": error_msg})
89
89
  return
90
90
 
91
+
91
92
  # Ensure app exists
92
93
  resolved_notebook_path = self._resolve_notebook_path(notebook_path)
94
+
93
95
  success, error_msg = await ensure_app_exists(resolved_notebook_path, force_recreate)
96
+
94
97
  if not success:
95
98
  self.set_status(500)
96
99
  self.finish({"error": error_msg})
@@ -0,0 +1,71 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import zipfile
5
+ import logging
6
+ from mito_ai.app_deploy.app_deploy_utils import add_files_to_zip
7
+
8
+ class TestAddFilesToZip:
9
+ """Test cases for add_files_to_zip helper function"""
10
+
11
+ def test_files_added_correctly(self, tmp_path):
12
+ """Ensure individual files are added correctly to the zip"""
13
+ # Create files
14
+ f1 = tmp_path / "file1.txt"
15
+ f1.write_text("file1 content")
16
+ f2 = tmp_path / "file2.txt"
17
+ f2.write_text("file2 content")
18
+
19
+ zip_path = tmp_path / "test.zip"
20
+ add_files_to_zip(str(zip_path), str(tmp_path), ["file1.txt", "file2.txt"])
21
+
22
+ with zipfile.ZipFile(zip_path, "r") as zf:
23
+ names = zf.namelist()
24
+ assert "file1.txt" in names
25
+ assert "file2.txt" in names
26
+ assert len(names) == 2
27
+
28
+ def test_directories_added_recursively(self, tmp_path):
29
+ """Ensure directories are added recursively with correct relative paths"""
30
+ nested = tmp_path / "folder"
31
+ nested.mkdir()
32
+ (nested / "nested1.txt").write_text("nested1 content")
33
+ subfolder = nested / "sub"
34
+ subfolder.mkdir()
35
+ (subfolder / "nested2.txt").write_text("nested2 content")
36
+
37
+ zip_path = tmp_path / "test.zip"
38
+ add_files_to_zip(str(zip_path), str(tmp_path), ["folder"])
39
+
40
+ with zipfile.ZipFile(zip_path, "r") as zf:
41
+ names = zf.namelist()
42
+ assert "folder/nested1.txt" in names
43
+ assert "folder/sub/nested2.txt" in names
44
+
45
+ def test_missing_files_skipped(self, tmp_path, caplog):
46
+ """Ensure missing files do not break the function and warning is logged"""
47
+ caplog.set_level(logging.WARNING)
48
+ zip_path = tmp_path / "test.zip"
49
+ add_files_to_zip(str(zip_path), str(tmp_path), ["does_not_exist.txt"], logger=logging.getLogger())
50
+
51
+ # Zip should exist but be empty
52
+ with zipfile.ZipFile(zip_path, "r") as zf:
53
+ assert zf.namelist() == []
54
+
55
+ # Check warning was logged
56
+ assert any("Skipping missing file" in record.message for record in caplog.records)
57
+
58
+ def test_arcname_paths_correct(self, tmp_path):
59
+ """Ensure arcname paths inside zip preserve relative paths to base_path"""
60
+ (tmp_path / "file.txt").write_text("content")
61
+ folder = tmp_path / "folder"
62
+ folder.mkdir()
63
+ (folder / "nested.txt").write_text("nested content")
64
+
65
+ zip_path = tmp_path / "test.zip"
66
+ add_files_to_zip(str(zip_path), str(tmp_path), ["file.txt", "folder"])
67
+
68
+ with zipfile.ZipFile(zip_path, "r") as zf:
69
+ names = zf.namelist()
70
+ assert "file.txt" in names
71
+ assert "folder/nested.txt" in names
@@ -53,7 +53,7 @@ def test_no_system_instructions_only_content():
53
53
  ]
54
54
  system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
55
55
 
56
- assert isinstance(system_prompt, anthropic.NotGiven)
56
+ assert isinstance(system_prompt, anthropic.Omit)
57
57
  assert len(anthropic_messages) == 2
58
58
  assert anthropic_messages[0]["role"] == "user"
59
59
  assert anthropic_messages[0]["content"] == "Hello!"
@@ -93,7 +93,7 @@ def test_empty_message_content():
93
93
  ]
94
94
  system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
95
95
 
96
- assert isinstance(system_prompt, anthropic.NotGiven)
96
+ assert isinstance(system_prompt, anthropic.Omit)
97
97
  assert len(anthropic_messages) == 1 # Should skip the message with missing content
98
98
  assert anthropic_messages[0]["role"] == "assistant"
99
99
  assert anthropic_messages[0]["content"] == "Hi!"
@@ -265,89 +265,5 @@ class TestStreamlitHandler:
265
265
  with pytest.raises(Exception, match="Generation failed"):
266
266
  await streamlit_handler("/path/to/notebook.ipynb")
267
267
 
268
- @pytest.mark.asyncio
269
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.parse_jupyter_notebook_to_extract_required_content')
270
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.StreamlitCodeGeneration')
271
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.validate_app')
272
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.create_app_file')
273
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.clean_directory_check')
274
- async def test_streamlit_handler_too_many_files_in_directory(self, mock_clean_directory, mock_create_file, mock_validator, mock_generator_class, mock_parse):
275
- """Test streamlit handler when there are too many files in the directory"""
276
- # Mock clean directory check to raise ValueError (simulating >10 files)
277
- mock_clean_directory.side_effect = ValueError("Too many files in directory: 10 allowed but 15 present. Create a new directory and retry")
278
-
279
- # The function should raise the ValueError before any other processing
280
- with pytest.raises(ValueError, match="Too many files in directory: 10 allowed but 15 present. Create a new directory and retry"):
281
- await streamlit_handler("/path/to/notebook.ipynb")
282
-
283
- # Verify that clean_directory_check was called
284
- mock_clean_directory.assert_called_once_with("/path/to/notebook.ipynb")
285
-
286
- # Verify that no other functions were called since the error occurred early
287
- mock_parse.assert_not_called()
288
- mock_generator_class.assert_not_called()
289
- mock_validator.assert_not_called()
290
- mock_create_file.assert_not_called()
291
268
 
292
269
 
293
- class TestCleanDirectoryCheck:
294
- """Test cases for clean_directory_check function"""
295
-
296
- @patch('mito_ai.streamlit_conversion.streamlit_utils.Path')
297
- def test_clean_directory_check_under_limit(self, mock_path):
298
- """Test clean_directory_check when directory has 10 or fewer files"""
299
- # Mock the Path class and its methods
300
- mock_path_instance = mock_path.return_value
301
- mock_path_instance.resolve.return_value = mock_path_instance
302
- mock_path_instance.parent = mock_path_instance
303
-
304
- # Mock directory existence check
305
- mock_path_instance.exists.return_value = True
306
-
307
- # Mock directory contents with 8 files
308
- mock_files = []
309
- for i in range(8):
310
- mock_file = MagicMock()
311
- mock_file.is_file.return_value = True
312
- mock_files.append(mock_file)
313
-
314
- mock_path_instance.iterdir.return_value = mock_files
315
-
316
- # Should not raise any exception
317
- clean_directory_check('/path/to/notebook.ipynb')
318
-
319
- # Verify calls
320
- mock_path.assert_called_once_with('/path/to/notebook.ipynb')
321
- mock_path_instance.resolve.assert_called_once()
322
- mock_path_instance.exists.assert_called_once()
323
- mock_path_instance.iterdir.assert_called_once()
324
-
325
- @patch('mito_ai.streamlit_conversion.streamlit_utils.Path')
326
- def test_clean_directory_check_over_limit(self, mock_path):
327
- """Test clean_directory_check when directory has more than 10 files"""
328
- # Mock the Path class and its methods
329
- mock_path_instance = mock_path.return_value
330
- mock_path_instance.resolve.return_value = mock_path_instance
331
- mock_path_instance.parent = mock_path_instance
332
-
333
- # Mock directory existence check
334
- mock_path_instance.exists.return_value = True
335
-
336
- # Mock directory contents with 15 files
337
- mock_files = []
338
- for i in range(15):
339
- mock_file = MagicMock()
340
- mock_file.is_file.return_value = True
341
- mock_files.append(mock_file)
342
-
343
- mock_path_instance.iterdir.return_value = mock_files
344
-
345
- # Should raise ValueError
346
- with pytest.raises(ValueError, match="Too many files in directory: 10 allowed but 15 present. Create a new directory and retry"):
347
- clean_directory_check('/path/to/notebook.ipynb')
348
-
349
- # Verify calls
350
- mock_path.assert_called_once_with('/path/to/notebook.ipynb')
351
- mock_path_instance.resolve.assert_called_once()
352
- mock_path_instance.exists.assert_called_once()
353
- mock_path_instance.iterdir.assert_called_once()
@@ -87,21 +87,6 @@ df=pd.read_csv('data.csv')
87
87
  validator = StreamlitValidator()
88
88
  errors = validator.get_runtime_errors(app_code, app_path)
89
89
  assert errors is None
90
-
91
-
92
- @patch('subprocess.Popen')
93
- def test_cleanup_with_process(self, mock_popen):
94
- """Test cleanup with running process"""
95
- validator = StreamlitValidator()
96
- validator.temp_dir = "/tmp/test_dir"
97
-
98
- # Mock directory exists
99
- with patch('os.path.exists', return_value=True):
100
- with patch('shutil.rmtree') as mock_rmtree:
101
- validator.cleanup()
102
-
103
- mock_rmtree.assert_called_once()
104
-
105
90
 
106
91
  @pytest.mark.parametrize("app_code,expected_has_validation_error,expected_error_message", [
107
92
  ("x=5", False, ""),
@@ -35,7 +35,7 @@ def test_basic_request_preparation():
35
35
  max_tokens = 100
36
36
  temperature = 0.7
37
37
  # Use NotGiven to ensure system is not included in inner_data
38
- system = anthropic.NotGiven()
38
+ system = anthropic.Omit()
39
39
  messages: List[MessageParam] = [{"role": "user", "content": "Hello"}]
40
40
  message_type = MessageType.CHAT
41
41
 
@@ -106,7 +106,7 @@ def test_tools_and_tool_choice():
106
106
  model="claude-3-sonnet",
107
107
  max_tokens=100,
108
108
  temperature=0.7,
109
- system=anthropic.NotGiven(),
109
+ system=anthropic.Omit(),
110
110
  messages=[{"role": "user", "content": "Hello"}],
111
111
  message_type=MessageType.CHAT,
112
112
  tools=tools,
@@ -124,7 +124,7 @@ def test_stream_parameter():
124
124
  model="claude-3-sonnet",
125
125
  max_tokens=100,
126
126
  temperature=0.7,
127
- system=anthropic.NotGiven(),
127
+ system=anthropic.Omit(),
128
128
  messages=[{"role": "user", "content": "Hello"}],
129
129
  message_type=MessageType.CHAT,
130
130
  tools=None,
@@ -150,7 +150,7 @@ def test_missing_user_info(monkeypatch):
150
150
  model="claude-3-sonnet",
151
151
  max_tokens=100,
152
152
  temperature=0.7,
153
- system=anthropic.NotGiven(),
153
+ system=anthropic.Omit(),
154
154
  messages=[{"role": "user", "content": "Hello"}],
155
155
  message_type=MessageType.CHAT,
156
156
  tools=None,