mito-ai 0.1.43__py3-none-any.whl → 0.1.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (62) hide show
  1. mito_ai/__init__.py +3 -3
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +2 -3
  4. mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
  5. mito_ai/app_deploy/app_deploy_utils.py +25 -0
  6. mito_ai/{app_builder → app_deploy}/handlers.py +48 -40
  7. mito_ai/{app_builder → app_deploy}/models.py +17 -14
  8. mito_ai/app_manager/handlers.py +33 -0
  9. mito_ai/app_manager/models.py +15 -1
  10. mito_ai/completions/handlers.py +40 -1
  11. mito_ai/completions/models.py +5 -1
  12. mito_ai/completions/prompt_builders/agent_system_message.py +6 -4
  13. mito_ai/completions/prompt_builders/prompt_constants.py +22 -4
  14. mito_ai/completions/providers.py +5 -11
  15. mito_ai/streamlit_conversion/streamlit_agent_handler.py +6 -3
  16. mito_ai/streamlit_conversion/streamlit_utils.py +15 -7
  17. mito_ai/streamlit_conversion/validate_streamlit_app.py +34 -25
  18. mito_ai/streamlit_preview/handlers.py +49 -70
  19. mito_ai/streamlit_preview/utils.py +41 -0
  20. mito_ai/tests/deploy_app/test_app_deploy_utils.py +71 -0
  21. mito_ai/tests/providers/test_anthropic_client.py +2 -2
  22. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +0 -84
  23. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +0 -15
  24. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +88 -0
  25. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +4 -1
  26. mito_ai/tests/utils/test_anthropic_utils.py +4 -4
  27. mito_ai/utils/anthropic_utils.py +11 -19
  28. mito_ai/utils/telemetry_utils.py +15 -5
  29. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +100 -100
  30. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  31. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  32. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -5
  33. mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.81703ac2bc645e5c2fc2.js → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js +1729 -790
  34. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js.map +1 -0
  35. mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.502aef26f0416fab7435.js → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js +17 -17
  36. mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.502aef26f0416fab7435.js.map → mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js.map +1 -1
  37. {mito_ai-0.1.43.dist-info → mito_ai-0.1.45.dist-info}/METADATA +2 -2
  38. {mito_ai-0.1.43.dist-info → mito_ai-0.1.45.dist-info}/RECORD +61 -57
  39. mito_ai-0.1.43.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.81703ac2bc645e5c2fc2.js.map +0 -1
  40. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  41. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  42. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  43. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  44. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  45. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  46. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  47. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  48. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  49. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  50. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  51. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  52. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  53. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  54. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  55. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  56. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  57. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  58. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  59. {mito_ai-0.1.43.data → mito_ai-0.1.45.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  60. {mito_ai-0.1.43.dist-info → mito_ai-0.1.45.dist-info}/WHEEL +0 -0
  61. {mito_ai-0.1.43.dist-info → mito_ai-0.1.45.dist-info}/entry_points.txt +0 -0
  62. {mito_ai-0.1.43.dist-info → mito_ai-0.1.45.dist-info}/licenses/LICENSE +0 -0
@@ -160,7 +160,7 @@ This attribute is observed by the websocket provider to push the error to the cl
160
160
  # If we hit a free tier limit, then raise an exception right away without retrying.
161
161
  self.log.exception(f"Error during request_completions: {e}")
162
162
  self.last_error = CompletionError.from_exception(e)
163
- log_ai_completion_error('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', message_type, e)
163
+ log_ai_completion_error('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', thread_id or "", message_type, e)
164
164
  raise
165
165
 
166
166
  except BaseException as e:
@@ -169,14 +169,14 @@ This attribute is observed by the websocket provider to push the error to the cl
169
169
  # Exponential backoff: wait 2^attempt seconds
170
170
  wait_time = 2 ** attempt
171
171
  self.log.info(f"Retrying request_completions after {wait_time}s (attempt {attempt + 1}/{max_retries + 1}): {str(e)}")
172
- log_ai_completion_retry('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', message_type, e)
172
+ log_ai_completion_retry('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', thread_id or "", message_type, e)
173
173
  await asyncio.sleep(wait_time)
174
174
  continue
175
175
  else:
176
176
  # Final failure after all retries - set error state and raise
177
177
  self.log.exception(f"Error during request_completions after {attempt + 1} attempts: {e}")
178
178
  self.last_error = CompletionError.from_exception(e)
179
- log_ai_completion_error('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', message_type, e)
179
+ log_ai_completion_error('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', thread_id or "", message_type, e)
180
180
  raise
181
181
 
182
182
  # This should never be reached due to the raise in the except block,
@@ -264,14 +264,8 @@ This attribute is observed by the websocket provider to push the error to the cl
264
264
  except BaseException as e:
265
265
  self.log.exception(f"Error during stream_completions: {e}")
266
266
  self.last_error = CompletionError.from_exception(e)
267
- log(
268
- MITO_AI_COMPLETION_ERROR,
269
- params={
270
- KEY_TYPE_PARAM: self.key_type,
271
- 'message_type': message_type.value,
272
- },
273
- error=e
274
- )
267
+ log_ai_completion_error('user_key' if self.key_type != MITO_SERVER_KEY else 'mito_server_key', thread_id, message_type, e)
268
+
275
269
  # Send error message to client before raising
276
270
  reply_fn(CompletionStreamChunk(
277
271
  parent_id=message_id,
@@ -4,7 +4,7 @@
4
4
  import logging
5
5
  import os
6
6
  from anthropic.types import MessageParam
7
- from typing import List, Optional, Tuple, cast, Union
7
+ from typing import List, Optional, Tuple, cast
8
8
 
9
9
  from mito_ai.logger import get_logger
10
10
  from mito_ai.streamlit_conversion.agent_utils import apply_patch_to_text, extract_todo_placeholders, fix_diff_headers
@@ -52,18 +52,19 @@ class StreamlitCodeGeneration:
52
52
  async def generate_streamlit_code(self, notebook: dict) -> str:
53
53
  """Send a query to the agent, get its response and parse the code"""
54
54
 
55
+ prompt_text = get_streamlit_app_creation_prompt(notebook)
56
+
55
57
  messages: List[MessageParam] = [
56
58
  cast(MessageParam, {
57
59
  "role": "user",
58
60
  "content": [{
59
61
  "type": "text",
60
- "text": get_streamlit_app_creation_prompt(notebook)
62
+ "text": prompt_text
61
63
  }]
62
64
  })
63
65
  ]
64
66
 
65
67
  agent_response = await self.get_response_from_agent(messages)
66
-
67
68
  converted_code = extract_code_blocks(agent_response)
68
69
 
69
70
  # Extract the TODOs from the agent's response
@@ -123,6 +124,7 @@ async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], st
123
124
 
124
125
  notebook_code = parse_jupyter_notebook_to_extract_required_content(notebook_path)
125
126
  streamlit_code_generator = StreamlitCodeGeneration()
127
+
126
128
  streamlit_code = await streamlit_code_generator.generate_streamlit_code(notebook_code)
127
129
 
128
130
  has_validation_error, errors = validate_app(streamlit_code, notebook_path)
@@ -149,6 +151,7 @@ async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], st
149
151
  absolute_notebook_path = os.path.join(os.getcwd(), notebook_path)
150
152
 
151
153
  app_directory = os.path.dirname(absolute_notebook_path)
154
+
152
155
  success_flag, app_path, message = create_app_file(app_directory, streamlit_code)
153
156
 
154
157
  if not success_flag:
@@ -25,7 +25,8 @@ def extract_code_blocks(message_content: str) -> str:
25
25
  matches = re.findall(pattern, message_content, re.DOTALL)
26
26
 
27
27
  # Concatenate with single newlines
28
- return '\n'.join(matches)
28
+ result = '\n'.join(matches)
29
+ return result
29
30
 
30
31
  def extract_unified_diff_blocks(message_content: str) -> str:
31
32
  """
@@ -53,14 +54,26 @@ def create_app_file(app_directory: str, code: str) -> Tuple[bool, str, str]:
53
54
  """
54
55
  try:
55
56
  app_path = os.path.join(app_directory, "app.py")
56
- with open(app_path, 'w') as f:
57
+
58
+ with open(app_path, 'w', encoding='utf-8') as f:
57
59
  f.write(code)
60
+
58
61
  return True, app_path, f"Successfully created {app_directory}"
59
62
  except IOError as e:
60
63
  return False, '', f"Error creating file: {str(e)}"
61
64
  except Exception as e:
62
65
  return False, '', f"Unexpected error: {str(e)}"
66
+
63
67
 
68
+ def get_app_path(app_directory: str) -> Optional[str]:
69
+ """
70
+ Check if the app.py file exists in the given directory.
71
+ """
72
+ app_path = os.path.join(app_directory, "app.py")
73
+ if not os.path.exists(app_path):
74
+ return None
75
+ return app_path
76
+
64
77
 
65
78
  def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Dict[str, Any]:
66
79
  """
@@ -129,8 +142,3 @@ def clean_directory_check(notebook_path: str) -> None:
129
142
 
130
143
  if not dir_path.exists():
131
144
  raise ValueError(f"Directory does not exist: {dir_path}")
132
-
133
- file_count = len([f for f in dir_path.iterdir() if f.is_file()])
134
- if file_count > 10:
135
- raise ValueError(
136
- f"Too many files in directory: 10 allowed but {file_count} present. Create a new directory and retry")
@@ -25,7 +25,7 @@ warnings.filterwarnings("ignore", message=".*bare mode.*")
25
25
 
26
26
  class StreamlitValidator:
27
27
  def __init__(self, port: int = 8501) -> None:
28
- self.temp_dir: Optional[str] = None
28
+ pass
29
29
 
30
30
  def get_syntax_error(self, app_code: str) -> Optional[str]:
31
31
  """Check if the Python code has valid syntax"""
@@ -58,26 +58,40 @@ class StreamlitValidator:
58
58
  os.chdir(original_cwd)
59
59
 
60
60
  with change_working_directory(directory):
61
- app_test = AppTest.from_string(app_code, default_timeout=30)
62
- app_test.run()
63
-
64
- # Check for exceptions
65
- if app_test.exception:
66
- errors = [{'type': 'exception', 'details': exc.value, 'message': exc.message, 'stack_trace': exc.stack_trace} for exc in app_test.exception]
67
- return errors
68
-
69
- # Check for error messages
70
- if app_test.error:
71
- errors = [{'type': 'error', 'details': err.value} for err in app_test.error]
72
- return errors
73
-
74
- return None
61
+ # Create a temporary file that uses UTF-8 encoding so
62
+ # we don't run into issues with non-ASCII characters on Windows.
63
+ # We use utf-8 encoding when writing the app.py file so this validation
64
+ # code mirrors the actual file.
65
+
66
+ # Note: Since the AppTest.from_file tries to open the file, we need to first close the file
67
+ # by exiting the context manager and using the delete=False flag so that the file still exists.
68
+ # Windows can't open the same file twice at the same time. We cleanup at the end.
69
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False, encoding="utf-8") as f:
70
+ f.write(app_code)
71
+ temp_path = f.name
75
72
 
76
- def cleanup(self) -> None:
77
- """Clean up the temporary files"""
78
- if self.temp_dir and os.path.exists(self.temp_dir):
79
- shutil.rmtree(self.temp_dir)
80
- self.temp_dir = None
73
+ try:
74
+ # Run Streamlit test from file with UTF-8 encoding
75
+ app_test = AppTest.from_file(temp_path, default_timeout=30)
76
+ app_test.run()
77
+
78
+ # Check for exceptions
79
+ if app_test.exception:
80
+ errors = [{'type': 'exception', 'details': exc.value, 'message': exc.message, 'stack_trace': exc.stack_trace} for exc in app_test.exception]
81
+ return errors
82
+
83
+ # Check for error messages
84
+ if app_test.error:
85
+ errors = [{'type': 'error', 'details': err.value} for err in app_test.error]
86
+ return errors
87
+
88
+ return None
89
+ finally:
90
+ # Clean up the temporary file
91
+ try:
92
+ os.unlink(temp_path)
93
+ except OSError:
94
+ pass # File might already be deleted
81
95
 
82
96
  def _validate_app(self, app_code: str, app_path: str) -> List[Dict[str, Any]]:
83
97
  """Complete validation pipeline"""
@@ -91,17 +105,12 @@ class StreamlitValidator:
91
105
 
92
106
  runtime_errors = self.get_runtime_errors(app_code, app_path)
93
107
 
94
- print('Found Runtime Errors', runtime_errors)
95
-
96
108
  if runtime_errors:
97
109
  errors.extend(runtime_errors)
98
110
 
99
111
  except Exception as e:
100
112
  errors.append({'type': 'validation', 'details': str(e)})
101
113
 
102
- finally:
103
- self.cleanup()
104
-
105
114
  return errors
106
115
 
107
116
  def validate_app(app_code: str, notebook_path: str) -> Tuple[bool, List[str]]:
@@ -4,161 +4,140 @@
4
4
  import os
5
5
  import tempfile
6
6
  import uuid
7
+ from mito_ai.streamlit_conversion.streamlit_utils import get_app_path
8
+ from mito_ai.streamlit_preview.utils import ensure_app_exists, validate_request_body
7
9
  import tornado
8
10
  from jupyter_server.base.handlers import APIHandler
9
11
  from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
10
12
  from mito_ai.streamlit_preview.manager import get_preview_manager
11
13
  from mito_ai.utils.create import initialize_user
14
+ from typing import Tuple, Optional
15
+
12
16
 
13
17
 
14
18
  class StreamlitPreviewHandler(APIHandler):
15
19
  """REST handler for streamlit preview operations."""
16
-
20
+
17
21
  def initialize(self) -> None:
18
22
  """Initialize the handler."""
19
23
  self.preview_manager = get_preview_manager()
20
-
24
+
21
25
  def _resolve_notebook_path(self, notebook_path: str) -> str:
22
26
  """
23
27
  Resolve the notebook path to an absolute path that can be found by the backend.
24
-
28
+
25
29
  This method handles path resolution issues that can occur in different environments:
26
-
30
+
27
31
  1. **Test Environment**: Playwright tests create temporary directories with complex
28
32
  paths like 'mitoai_ui_tests-app_builde-ab3a5-n-Test-Preview-as-Streamlit-chromium/'
29
33
  that the backend can't directly access.
30
-
34
+
31
35
  2. **JupyterHub/Cloud Deployments**: In cloud environments, users may have notebooks
32
36
  in subdirectories that aren't immediately accessible from the server root.
33
-
37
+
34
38
  3. **Docker Containers**: When running in containers, the working directory and
35
39
  file paths may not align with what the frontend reports.
36
-
40
+
37
41
  4. **Multi-user Environments**: In enterprise deployments, users may have notebooks
38
42
  in user-specific directories that require path resolution.
39
-
43
+
40
44
  The method tries multiple strategies:
41
45
  1. If the path is already absolute, return it as-is
42
46
  2. Try to resolve relative to the Jupyter server's root directory
43
47
  3. Search recursively through subdirectories for a file with the same name
44
48
  4. Return the original path if not found (will cause a clear error message)
45
-
49
+
46
50
  Args:
47
51
  notebook_path (str): The notebook path from the frontend (may be relative or absolute)
48
-
52
+
49
53
  Returns:
50
54
  str: The resolved absolute path to the notebook file
51
55
  """
52
56
  # If the path is already absolute, return it
53
57
  if os.path.isabs(notebook_path):
54
58
  return notebook_path
55
-
59
+
56
60
  # Get the Jupyter server's root directory
57
- server_root = self.settings.get('server_root_dir', os.getcwd())
58
-
61
+ server_root = self.settings.get("server_root_dir", os.getcwd())
62
+
59
63
  # Try to find the notebook file in the server root
60
64
  resolved_path = os.path.join(server_root, notebook_path)
61
65
  if os.path.exists(resolved_path):
62
66
  return resolved_path
63
-
67
+
64
68
  # If not found, try to find it in subdirectories
65
69
  # This handles cases where the notebook is in a subdirectory that the frontend
66
70
  # doesn't know about, or where the path structure differs between frontend and backend
67
71
  for root, dirs, files in os.walk(server_root):
68
72
  if os.path.basename(notebook_path) in files:
69
73
  return os.path.join(root, os.path.basename(notebook_path))
70
-
74
+
71
75
  # If still not found, return the original path (will cause a clear error)
72
76
  # This ensures we get a meaningful error message rather than a generic "file not found"
73
77
  return os.path.join(os.getcwd(), notebook_path)
74
78
 
75
79
  @tornado.web.authenticated
76
80
  async def post(self) -> None:
77
- """Start a new streamlit preview.
78
-
79
- Expected JSON body:
80
- {
81
- "notebook_path": "path/to/notebook.ipynb"
82
- }
83
-
84
- Returns:
85
- {
86
- "id": "preview_id",
87
- "port": 8501,
88
- "url": "http://localhost:8501"
89
- }
90
- """
81
+ """Start a new streamlit preview."""
91
82
  try:
92
- # Parse request body
83
+ # Parse and validate request
93
84
  body = self.get_json_body()
94
- if body is None:
85
+ is_valid, error_msg, notebook_path, force_recreate = validate_request_body(body)
86
+ if not is_valid or notebook_path is None:
95
87
  self.set_status(400)
96
- self.finish({"error": 'Invalid or missing JSON body'})
88
+ self.finish({"error": error_msg})
97
89
  return
98
90
 
99
- notebook_path = body.get('notebook_path')
100
91
 
101
- if not notebook_path:
102
- self.set_status(400)
103
- self.finish({"error": 'Missing notebook_path parameter'})
104
- return
105
-
106
- # Resolve the notebook path to find the actual file
92
+ # Ensure app exists
107
93
  resolved_notebook_path = self._resolve_notebook_path(notebook_path)
108
-
109
- # Generate preview ID
110
- preview_id = str(uuid.uuid4())
111
-
112
- # Generate streamlit code using existing handler
113
- print('notebook_path', notebook_path)
114
- success, app_path, message = await streamlit_handler(resolved_notebook_path)
115
-
116
- if not success or app_path is None:
94
+
95
+ success, error_msg = await ensure_app_exists(resolved_notebook_path, force_recreate)
96
+
97
+ if not success:
117
98
  self.set_status(500)
118
- self.finish({"error": f'Failed to generate streamlit code: {message}'})
99
+ self.finish({"error": error_msg})
119
100
  return
120
-
121
- # Start streamlit preview
101
+
102
+ # Start preview
103
+ # TODO: There's a bug here where when the user rebuilds and already running app. Instead of
104
+ # creating a new process, we should update the existing process. The app displayed to the user
105
+ # does update, but that's just because of hot reloading when we overwrite the app.py file.
106
+ preview_id = str(uuid.uuid4())
122
107
  resolved_app_directory = os.path.dirname(resolved_notebook_path)
123
108
  success, message, port = self.preview_manager.start_streamlit_preview(resolved_app_directory, preview_id)
124
-
109
+
125
110
  if not success:
126
111
  self.set_status(500)
127
- self.finish({"error": f'Failed to start preview: {message}'})
112
+ self.finish({"error": f"Failed to start preview: {message}"})
128
113
  return
129
-
130
- # Return success response - APIHandler automatically handles JSON serialization
131
- self.finish({
132
- 'id': preview_id,
133
- 'port': port,
134
- 'url': f'http://localhost:{port}'
135
- })
136
-
114
+
115
+ # Return success response
116
+ self.finish({"id": preview_id, "port": port, "url": f"http://localhost:{port}"})
117
+
137
118
  except Exception as e:
138
119
  print(f"Error in streamlit preview handler: {e}")
139
120
  self.set_status(500)
140
-
141
- # Respond with the error
142
121
  self.finish({"error": str(e)})
143
-
122
+
144
123
  @tornado.web.authenticated
145
124
  def delete(self, preview_id: str) -> None:
146
125
  """Stop a streamlit preview."""
147
126
  try:
148
127
  if not preview_id:
149
128
  self.set_status(400)
150
- self.finish({"error": 'Missing preview_id parameter'})
129
+ self.finish({"error": "Missing preview_id parameter"})
151
130
  return
152
-
131
+
153
132
  # Stop the preview
154
133
  stopped = self.preview_manager.stop_preview(preview_id)
155
-
134
+
156
135
  if stopped:
157
136
  self.set_status(204) # No content
158
137
  else:
159
138
  self.set_status(404)
160
- self.finish({"error": f'Preview {preview_id} not found'})
161
-
139
+ self.finish({"error": f"Preview {preview_id} not found"})
140
+
162
141
  except Exception as e:
163
142
  self.set_status(500)
164
- self.finish({"error": f'Internal server error: {str(e)}'})
143
+ self.finish({"error": f"Internal server error: {str(e)}"})
@@ -0,0 +1,41 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import Tuple, Optional
5
+ import os
6
+ from mito_ai.streamlit_conversion.streamlit_utils import get_app_path
7
+ from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
8
+
9
+
10
+ def validate_request_body(body: Optional[dict]) -> Tuple[bool, str, Optional[str], bool]:
11
+ """Validate the request body and extract notebook_path and force_recreate."""
12
+ if body is None:
13
+ return False, "Invalid or missing JSON body", None, False
14
+
15
+ notebook_path = body.get("notebook_path")
16
+ if not notebook_path:
17
+ return False, "Missing notebook_path parameter", None, False
18
+
19
+ force_recreate = body.get("force_recreate", False)
20
+ if not isinstance(force_recreate, bool):
21
+ return False, "force_recreate must be a boolean", None, False
22
+
23
+ return True, "", notebook_path, force_recreate
24
+
25
+ async def ensure_app_exists(resolved_notebook_path: str, force_recreate: bool = False) -> Tuple[bool, str]:
26
+ """Ensure app.py exists, generating it if necessary or if force_recreate is True."""
27
+ # Check if the app already exists
28
+ app_path = get_app_path(os.path.dirname(resolved_notebook_path))
29
+
30
+ if app_path is None or force_recreate:
31
+ if app_path is None:
32
+ print("[Mito AI] App path not found, generating streamlit code")
33
+ else:
34
+ print("[Mito AI] Force recreating streamlit app")
35
+
36
+ success, app_path, message = await streamlit_handler(resolved_notebook_path)
37
+
38
+ if not success or app_path is None:
39
+ return False, f"Failed to generate streamlit code: {message}"
40
+
41
+ return True, ""
@@ -0,0 +1,71 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import zipfile
5
+ import logging
6
+ from mito_ai.app_deploy.app_deploy_utils import add_files_to_zip
7
+
8
+ class TestAddFilesToZip:
9
+ """Test cases for add_files_to_zip helper function"""
10
+
11
+ def test_files_added_correctly(self, tmp_path):
12
+ """Ensure individual files are added correctly to the zip"""
13
+ # Create files
14
+ f1 = tmp_path / "file1.txt"
15
+ f1.write_text("file1 content")
16
+ f2 = tmp_path / "file2.txt"
17
+ f2.write_text("file2 content")
18
+
19
+ zip_path = tmp_path / "test.zip"
20
+ add_files_to_zip(str(zip_path), str(tmp_path), ["file1.txt", "file2.txt"])
21
+
22
+ with zipfile.ZipFile(zip_path, "r") as zf:
23
+ names = zf.namelist()
24
+ assert "file1.txt" in names
25
+ assert "file2.txt" in names
26
+ assert len(names) == 2
27
+
28
+ def test_directories_added_recursively(self, tmp_path):
29
+ """Ensure directories are added recursively with correct relative paths"""
30
+ nested = tmp_path / "folder"
31
+ nested.mkdir()
32
+ (nested / "nested1.txt").write_text("nested1 content")
33
+ subfolder = nested / "sub"
34
+ subfolder.mkdir()
35
+ (subfolder / "nested2.txt").write_text("nested2 content")
36
+
37
+ zip_path = tmp_path / "test.zip"
38
+ add_files_to_zip(str(zip_path), str(tmp_path), ["folder"])
39
+
40
+ with zipfile.ZipFile(zip_path, "r") as zf:
41
+ names = zf.namelist()
42
+ assert "folder/nested1.txt" in names
43
+ assert "folder/sub/nested2.txt" in names
44
+
45
+ def test_missing_files_skipped(self, tmp_path, caplog):
46
+ """Ensure missing files do not break the function and warning is logged"""
47
+ caplog.set_level(logging.WARNING)
48
+ zip_path = tmp_path / "test.zip"
49
+ add_files_to_zip(str(zip_path), str(tmp_path), ["does_not_exist.txt"], logger=logging.getLogger())
50
+
51
+ # Zip should exist but be empty
52
+ with zipfile.ZipFile(zip_path, "r") as zf:
53
+ assert zf.namelist() == []
54
+
55
+ # Check warning was logged
56
+ assert any("Skipping missing file" in record.message for record in caplog.records)
57
+
58
+ def test_arcname_paths_correct(self, tmp_path):
59
+ """Ensure arcname paths inside zip preserve relative paths to base_path"""
60
+ (tmp_path / "file.txt").write_text("content")
61
+ folder = tmp_path / "folder"
62
+ folder.mkdir()
63
+ (folder / "nested.txt").write_text("nested content")
64
+
65
+ zip_path = tmp_path / "test.zip"
66
+ add_files_to_zip(str(zip_path), str(tmp_path), ["file.txt", "folder"])
67
+
68
+ with zipfile.ZipFile(zip_path, "r") as zf:
69
+ names = zf.namelist()
70
+ assert "file.txt" in names
71
+ assert "folder/nested.txt" in names
@@ -53,7 +53,7 @@ def test_no_system_instructions_only_content():
53
53
  ]
54
54
  system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
55
55
 
56
- assert isinstance(system_prompt, anthropic.NotGiven)
56
+ assert isinstance(system_prompt, anthropic.Omit)
57
57
  assert len(anthropic_messages) == 2
58
58
  assert anthropic_messages[0]["role"] == "user"
59
59
  assert anthropic_messages[0]["content"] == "Hello!"
@@ -93,7 +93,7 @@ def test_empty_message_content():
93
93
  ]
94
94
  system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
95
95
 
96
- assert isinstance(system_prompt, anthropic.NotGiven)
96
+ assert isinstance(system_prompt, anthropic.Omit)
97
97
  assert len(anthropic_messages) == 1 # Should skip the message with missing content
98
98
  assert anthropic_messages[0]["role"] == "assistant"
99
99
  assert anthropic_messages[0]["content"] == "Hi!"
@@ -265,89 +265,5 @@ class TestStreamlitHandler:
265
265
  with pytest.raises(Exception, match="Generation failed"):
266
266
  await streamlit_handler("/path/to/notebook.ipynb")
267
267
 
268
- @pytest.mark.asyncio
269
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.parse_jupyter_notebook_to_extract_required_content')
270
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.StreamlitCodeGeneration')
271
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.validate_app')
272
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.create_app_file')
273
- @patch('mito_ai.streamlit_conversion.streamlit_agent_handler.clean_directory_check')
274
- async def test_streamlit_handler_too_many_files_in_directory(self, mock_clean_directory, mock_create_file, mock_validator, mock_generator_class, mock_parse):
275
- """Test streamlit handler when there are too many files in the directory"""
276
- # Mock clean directory check to raise ValueError (simulating >10 files)
277
- mock_clean_directory.side_effect = ValueError("Too many files in directory: 10 allowed but 15 present. Create a new directory and retry")
278
-
279
- # The function should raise the ValueError before any other processing
280
- with pytest.raises(ValueError, match="Too many files in directory: 10 allowed but 15 present. Create a new directory and retry"):
281
- await streamlit_handler("/path/to/notebook.ipynb")
282
-
283
- # Verify that clean_directory_check was called
284
- mock_clean_directory.assert_called_once_with("/path/to/notebook.ipynb")
285
-
286
- # Verify that no other functions were called since the error occurred early
287
- mock_parse.assert_not_called()
288
- mock_generator_class.assert_not_called()
289
- mock_validator.assert_not_called()
290
- mock_create_file.assert_not_called()
291
268
 
292
269
 
293
- class TestCleanDirectoryCheck:
294
- """Test cases for clean_directory_check function"""
295
-
296
- @patch('mito_ai.streamlit_conversion.streamlit_utils.Path')
297
- def test_clean_directory_check_under_limit(self, mock_path):
298
- """Test clean_directory_check when directory has 10 or fewer files"""
299
- # Mock the Path class and its methods
300
- mock_path_instance = mock_path.return_value
301
- mock_path_instance.resolve.return_value = mock_path_instance
302
- mock_path_instance.parent = mock_path_instance
303
-
304
- # Mock directory existence check
305
- mock_path_instance.exists.return_value = True
306
-
307
- # Mock directory contents with 8 files
308
- mock_files = []
309
- for i in range(8):
310
- mock_file = MagicMock()
311
- mock_file.is_file.return_value = True
312
- mock_files.append(mock_file)
313
-
314
- mock_path_instance.iterdir.return_value = mock_files
315
-
316
- # Should not raise any exception
317
- clean_directory_check('/path/to/notebook.ipynb')
318
-
319
- # Verify calls
320
- mock_path.assert_called_once_with('/path/to/notebook.ipynb')
321
- mock_path_instance.resolve.assert_called_once()
322
- mock_path_instance.exists.assert_called_once()
323
- mock_path_instance.iterdir.assert_called_once()
324
-
325
- @patch('mito_ai.streamlit_conversion.streamlit_utils.Path')
326
- def test_clean_directory_check_over_limit(self, mock_path):
327
- """Test clean_directory_check when directory has more than 10 files"""
328
- # Mock the Path class and its methods
329
- mock_path_instance = mock_path.return_value
330
- mock_path_instance.resolve.return_value = mock_path_instance
331
- mock_path_instance.parent = mock_path_instance
332
-
333
- # Mock directory existence check
334
- mock_path_instance.exists.return_value = True
335
-
336
- # Mock directory contents with 15 files
337
- mock_files = []
338
- for i in range(15):
339
- mock_file = MagicMock()
340
- mock_file.is_file.return_value = True
341
- mock_files.append(mock_file)
342
-
343
- mock_path_instance.iterdir.return_value = mock_files
344
-
345
- # Should raise ValueError
346
- with pytest.raises(ValueError, match="Too many files in directory: 10 allowed but 15 present. Create a new directory and retry"):
347
- clean_directory_check('/path/to/notebook.ipynb')
348
-
349
- # Verify calls
350
- mock_path.assert_called_once_with('/path/to/notebook.ipynb')
351
- mock_path_instance.resolve.assert_called_once()
352
- mock_path_instance.exists.assert_called_once()
353
- mock_path_instance.iterdir.assert_called_once()