mito-ai 0.1.38__py3-none-any.whl → 0.1.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. mito_ai/__init__.py +8 -0
  2. mito_ai/_version.py +1 -1
  3. mito_ai/app_builder/handlers.py +16 -11
  4. mito_ai/completions/handlers.py +1 -1
  5. mito_ai/completions/prompt_builders/agent_system_message.py +18 -45
  6. mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
  7. mito_ai/openai_client.py +1 -1
  8. mito_ai/streamlit_conversion/agent_utils.py +116 -0
  9. mito_ai/streamlit_conversion/prompts/prompt_constants.py +59 -0
  10. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  11. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +45 -0
  12. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  13. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +44 -0
  14. mito_ai/streamlit_conversion/streamlit_agent_handler.py +72 -42
  15. mito_ai/streamlit_conversion/streamlit_system_prompt.py +19 -17
  16. mito_ai/streamlit_conversion/streamlit_utils.py +43 -5
  17. mito_ai/streamlit_conversion/validate_streamlit_app.py +116 -0
  18. mito_ai/streamlit_preview/handlers.py +7 -4
  19. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +153 -66
  20. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +119 -0
  21. mito_ai/tests/utils/test_anthropic_utils.py +2 -2
  22. mito_ai/utils/anthropic_utils.py +4 -4
  23. mito_ai/utils/open_ai_utils.py +0 -4
  24. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  25. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  26. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  27. mito_ai-0.1.38.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5d1d7c234e2dc7c9d97b.js → mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js +411 -78
  28. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js.map +1 -0
  29. mito_ai-0.1.38.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.bcce4ea34631acf6dbbe.js → mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js +3 -3
  30. mito_ai-0.1.38.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.bcce4ea34631acf6dbbe.js.map → mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js.map +1 -1
  31. {mito_ai-0.1.38.dist-info → mito_ai-0.1.40.dist-info}/METADATA +4 -1
  32. {mito_ai-0.1.38.dist-info → mito_ai-0.1.40.dist-info}/RECORD +44 -38
  33. mito_ai/streamlit_conversion/validate_and_run_streamlit_code.py +0 -208
  34. mito_ai/tests/streamlit_conversion/test_validate_and_run_streamlit_code.py +0 -418
  35. mito_ai-0.1.38.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.5d1d7c234e2dc7c9d97b.js.map +0 -1
  36. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  37. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  38. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  39. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  40. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  41. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  42. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  43. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  44. {mito_ai-0.1.38.data → mito_ai-0.1.40.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  45. {mito_ai-0.1.38.dist-info → mito_ai-0.1.40.dist-info}/WHEEL +0 -0
  46. {mito_ai-0.1.38.dist-info → mito_ai-0.1.40.dist-info}/entry_points.txt +0 -0
  47. {mito_ai-0.1.38.dist-info → mito_ai-0.1.40.dist-info}/licenses/LICENSE +0 -0
@@ -4,31 +4,24 @@
4
4
  import logging
5
5
  import os
6
6
  from anthropic.types import MessageParam
7
- from typing import List, Optional, Tuple, cast
7
+ from typing import List, Optional, Tuple, cast, Union
8
8
 
9
9
  from mito_ai.logger import get_logger
10
+ from mito_ai.streamlit_conversion.agent_utils import apply_patch_to_text, extract_todo_placeholders, fix_diff_headers
11
+ from mito_ai.streamlit_conversion.prompts.streamlit_app_creation_prompt import get_streamlit_app_creation_prompt
12
+ from mito_ai.streamlit_conversion.prompts.streamlit_error_correction_prompt import get_streamlit_error_correction_prompt
13
+ from mito_ai.streamlit_conversion.prompts.streamlit_finish_todo_prompt import get_finish_todo_prompt
10
14
  from mito_ai.streamlit_conversion.streamlit_system_prompt import streamlit_system_prompt
11
- from mito_ai.streamlit_conversion.validate_and_run_streamlit_code import streamlit_code_validator
12
- from mito_ai.streamlit_conversion.streamlit_utils import extract_code_blocks, create_app_file, parse_jupyter_notebook_to_extract_required_content
15
+ from mito_ai.streamlit_conversion.validate_streamlit_app import validate_app
16
+ from mito_ai.streamlit_conversion.streamlit_utils import extract_code_blocks, create_app_file, extract_unified_diff_blocks, parse_jupyter_notebook_to_extract_required_content
13
17
  from mito_ai.utils.anthropic_utils import stream_anthropic_completion_from_mito_server
14
18
  from mito_ai.completions.models import MessageType
15
19
  from mito_ai.utils.telemetry_utils import log_streamlit_app_creation_error, log_streamlit_app_creation_retry, log_streamlit_app_creation_success
20
+ from mito_ai.streamlit_conversion.streamlit_utils import clean_directory_check
16
21
 
17
22
  STREAMLIT_AI_MODEL = "claude-3-5-haiku-latest"
18
23
 
19
24
  class StreamlitCodeGeneration:
20
- def __init__(self, notebook: dict) -> None:
21
-
22
- self.messages: List[MessageParam] = [
23
- cast(MessageParam, {
24
- "role": "user",
25
- "content": [{
26
- "type": "text",
27
- "text": f"Here is my jupyter notebook content that I want to convert into a Streamlit dashboard - {notebook}"
28
- }]
29
- })
30
- ]
31
-
32
25
  @property
33
26
  def log(self) -> logging.Logger:
34
27
  """Use Mito AI logger."""
@@ -56,57 +49,89 @@ class StreamlitCodeGeneration:
56
49
  accumulated_response += stream_chunk
57
50
  return accumulated_response
58
51
 
59
- def add_agent_response_to_context(self, agent_response: str) -> None:
60
- """Add the agent's response to the history"""
61
- self.messages.append(
52
+ async def generate_streamlit_code(self, notebook: dict) -> str:
53
+ """Send a query to the agent, get its response and parse the code"""
54
+
55
+ messages: List[MessageParam] = [
62
56
  cast(MessageParam, {
63
- "role": "assistant",
57
+ "role": "user",
64
58
  "content": [{
65
59
  "type": "text",
66
- "text": agent_response
60
+ "text": get_streamlit_app_creation_prompt(notebook)
67
61
  }]
68
62
  })
69
- )
70
-
71
- async def generate_streamlit_code(self) -> str:
72
- """Send a query to the agent, get its response and parse the code"""
73
- agent_response = await self.get_response_from_agent(self.messages)
63
+ ]
64
+
65
+ agent_response = await self.get_response_from_agent(messages)
74
66
 
75
67
  converted_code = extract_code_blocks(agent_response)
76
- self.add_agent_response_to_context(converted_code)
68
+
69
+ # Extract the TODOs from the agent's response
70
+ todo_placeholders = extract_todo_placeholders(agent_response)
71
+
72
+ for todo_placeholder in todo_placeholders:
73
+ print(f"Processing AI TODO: {todo_placeholder}")
74
+ todo_prompt = get_finish_todo_prompt(notebook, converted_code, todo_placeholder)
75
+ todo_messages: List[MessageParam] = [
76
+ cast(MessageParam, {
77
+ "role": "user",
78
+ "content": [{
79
+ "type": "text",
80
+ "text": todo_prompt
81
+ }]
82
+ })
83
+ ]
84
+ todo_response = await self.get_response_from_agent(todo_messages)
85
+
86
+ # Apply the diff to the streamlit app
87
+ exctracted_diff = extract_unified_diff_blocks(todo_response)
88
+ fixed_diff = fix_diff_headers(exctracted_diff)
89
+ converted_code = apply_patch_to_text(converted_code, fixed_diff)
90
+
77
91
  return converted_code
78
92
 
79
93
 
80
- async def correct_error_in_generation(self, error: str) -> str:
94
+ async def correct_error_in_generation(self, error: str, streamlit_app_code: str) -> str:
81
95
  """If errors are present, send it back to the agent to get corrections in code"""
82
- self.messages.append(
96
+ messages: List[MessageParam] = [
83
97
  cast(MessageParam, {
84
98
  "role": "user",
85
99
  "content": [{
86
100
  "type": "text",
87
- "text": f"When I run the streamlit app code, I get the following error: {error}\nPlease return the FULL Streamlit app code with the error corrected"
101
+ "text": get_streamlit_error_correction_prompt(error, streamlit_app_code)
88
102
  }]
89
103
  })
90
- )
91
- agent_response = await self.get_response_from_agent(self.messages)
92
- converted_code = extract_code_blocks(agent_response)
93
- self.add_agent_response_to_context(converted_code)
104
+ ]
105
+ agent_response = await self.get_response_from_agent(messages)
106
+
107
+ # Apply the diff to the streamlit app
108
+ exctracted_diff = extract_unified_diff_blocks(agent_response)
109
+
110
+ print(f"\n\nExtracted diff: {exctracted_diff}")
111
+ fixed_diff = fix_diff_headers(exctracted_diff)
112
+ streamlit_app_code = apply_patch_to_text(streamlit_app_code, fixed_diff)
113
+
114
+ print("\n\nUpdated app code: ", streamlit_app_code)
94
115
 
95
- return converted_code
116
+ return streamlit_app_code
96
117
 
97
118
 
98
119
  async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], str]:
99
120
  """Handler function for streamlit code generation and validation"""
121
+
122
+ clean_directory_check(notebook_path)
123
+
100
124
  notebook_code = parse_jupyter_notebook_to_extract_required_content(notebook_path)
101
- streamlit_code_generator = StreamlitCodeGeneration(notebook_code)
102
- streamlit_code = await streamlit_code_generator.generate_streamlit_code()
103
- has_validation_error, error = streamlit_code_validator(streamlit_code)
104
-
125
+ streamlit_code_generator = StreamlitCodeGeneration()
126
+ streamlit_code = await streamlit_code_generator.generate_streamlit_code(notebook_code)
105
127
 
128
+ has_validation_error, errors = validate_app(streamlit_code, notebook_path)
106
129
  tries = 0
107
130
  while has_validation_error and tries < 5:
108
- streamlit_code = await streamlit_code_generator.correct_error_in_generation(error)
109
- has_validation_error, error = streamlit_code_validator(streamlit_code)
131
+ for error in errors:
132
+ streamlit_code = await streamlit_code_generator.correct_error_in_generation(error, streamlit_code)
133
+
134
+ has_validation_error, errors = validate_app(streamlit_code, notebook_path)
110
135
 
111
136
  if has_validation_error:
112
137
  # TODO: We can't easily get the key type here, so for the beta release
@@ -116,9 +141,14 @@ async def streamlit_handler(notebook_path: str) -> Tuple[bool, Optional[str], st
116
141
 
117
142
  if has_validation_error:
118
143
  log_streamlit_app_creation_error('mito_server_key', MessageType.STREAMLIT_CONVERSION, error)
119
- return False, None, "Error generating streamlit code by agent"
144
+ return False, '', "Error generating streamlit code by agent"
145
+
146
+ # Convert to absolute path for directory calculation
147
+ absolute_notebook_path = notebook_path
148
+ if not (notebook_path.startswith('/') or (len(notebook_path) > 1 and notebook_path[1] == ':')):
149
+ absolute_notebook_path = os.path.join(os.getcwd(), notebook_path)
120
150
 
121
- app_directory = os.path.dirname(notebook_path)
151
+ app_directory = os.path.dirname(absolute_notebook_path)
122
152
  success_flag, app_path, message = create_app_file(app_directory, streamlit_code)
123
153
 
124
154
  if not success_flag:
@@ -1,7 +1,7 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
- streamlit_system_prompt = """You are a senior data scientist and Streamlit expert specializing in converting Jupyter notebooks into professional dashboard applications.
4
+ streamlit_system_prompt = """You are a code conversion specialist who converts Jupyter notebooks into Streamlit applications with ABSOLUTE FIDELITY.
5
5
 
6
6
  ROLE AND EXPERTISE:
7
7
  - Expert in Python, Jupyter notebooks, Streamlit, and data visualization
@@ -17,17 +17,25 @@ TASK REQUIREMENTS:
17
17
  STREAMLIT IMPLEMENTATION GUIDELINES:
18
18
  - Use appropriate Streamlit components (st.title, st.header, st.subheader, st.markdown, etc.)
19
19
  - Display all visualizations using st.pyplot(), st.plotly_chart(), or st.altair_chart() as appropriate
20
+ - Do not convert database connections into Streamlit's secret.toml format. If the user inlined their database credentials, are importing from an environment variable, or reading from a connections file, assume that same approach will work in the streamlit app.
20
21
  - Show dataframes and tables using st.dataframe() or st.table()
21
22
  - Include all text explanations and insights from markdown cells
22
23
  - Add interactive elements where beneficial (filters, selectors, etc.)
23
24
  - Ensure professional styling and layout suitable for executives
24
25
 
25
- CODE STRUCTURE:
26
- - Generate a complete, runnable app.py file
27
- - Include all necessary imports
28
- - Handle data loading and processing
29
- - Organize content with clear sections and headers
30
- - Include comments explaining key sections
26
+ CRITICAL REQUIREMENTS:
27
+ 1. **PRESERVE ALL CODE EXACTLY**: Every line of code, every data structure, every import must be included in full
28
+ 2. **NO PLACEHOLDERS**: Never use comments like "# Add more data here" or "# Fill in the rest"
29
+ 3. **NO SIMPLIFICATION**: Do not replace actual data with sample data or hardcoded examples
30
+ 4. **COMPLETE DATA STRUCTURES**: If a notebook has a 1000-line dictionary, include all 1000 lines
31
+ 5. **PRESERVE DATA LOADING**: If the notebook reads from files, the Streamlit app must read from the same files
32
+ 6. **NO IMPROVIZAITION**: Do not provide your own interpretations of the analysis. Just convert the existing analysis into a streamlit app.
33
+
34
+ STYLE GUIDELINES:
35
+ - Create a professional, executive-friendly dashboard
36
+ - If there are variables in the notebook that the streamlit app viewer would likely want to configure, then use the appropriate streamlit component to allow them to do so. For examples, if the notebook has a variable called "start_date" and "end_date", then use the st.date_input component to allow the user to select the start and end dates.
37
+ - Do not use emojis unless they are in the notebook already
38
+ - Do not modify the graphs or analysis. If the notebook has a graph, use the same graph in the streamlit app.
31
39
  - Always include the following code at the top of the file so the user does not use the wrong deploy button
32
40
  ```python
33
41
  st.markdown(\"\"\"
@@ -41,13 +49,7 @@ st.markdown(\"\"\"
41
49
  ```
42
50
 
43
51
  OUTPUT FORMAT:
44
- - Provide the complete app.py file code
45
- - Ensure all notebook outputs are faithfully reproduced
46
- - Make the dashboard professional and presentation-ready
47
- - Focus on clarity and executive-level communication
48
- - Don't give extra explanations, just give the python code
49
- - Do NOT add emojis
50
- - Do NOT modify the graphs or analysis
51
- - Do NOT provide your own interpretations for the analysis
52
-
53
- Remember: The goal is to transform technical analysis into a polished, interactive/visually appealing dashboard that executives can easily understand and navigate."""
52
+ - Output the complete, runnable app.py file.
53
+ - Do not output any extra text, just give the python code.
54
+
55
+ """
@@ -5,6 +5,7 @@ import re
5
5
  import json
6
6
  import os
7
7
  from typing import Dict, Optional, Tuple, Any
8
+ from pathlib import Path
8
9
 
9
10
  def extract_code_blocks(message_content: str) -> str:
10
11
  """
@@ -19,7 +20,6 @@ def extract_code_blocks(message_content: str) -> str:
19
20
  if "```python" not in message_content:
20
21
  return message_content
21
22
 
22
- # return message_content.split('```python\n')[1].split('\n```')[0]
23
23
  # Use regex to find all Python code blocks
24
24
  pattern = r'```python\n(.*?)```'
25
25
  matches = re.findall(pattern, message_content, re.DOTALL)
@@ -27,8 +27,19 @@ def extract_code_blocks(message_content: str) -> str:
27
27
  # Concatenate with single newlines
28
28
  return '\n'.join(matches)
29
29
 
30
+ def extract_unified_diff_blocks(message_content: str) -> str:
31
+ """
32
+ Extract all unified_diff blocks from Claude's response.
33
+ """
34
+ if "```unified_diff" not in message_content:
35
+ return message_content
36
+
37
+ pattern = r'```unified_diff\n(.*?)```'
38
+ matches = re.findall(pattern, message_content, re.DOTALL)
39
+ return '\n'.join(matches)
40
+
30
41
 
31
- def create_app_file(app_directory: str, code: str) -> Tuple[bool, Optional[str], str]:
42
+ def create_app_file(app_directory: str, code: str) -> Tuple[bool, str, str]:
32
43
  """
33
44
  Create app.py file and write code to it with error handling
34
45
 
@@ -46,9 +57,9 @@ def create_app_file(app_directory: str, code: str) -> Tuple[bool, Optional[str],
46
57
  f.write(code)
47
58
  return True, app_path, f"Successfully created {app_directory}"
48
59
  except IOError as e:
49
- return False, None, f"Error creating file: {str(e)}"
60
+ return False, '', f"Error creating file: {str(e)}"
50
61
  except Exception as e:
51
- return False, None, f"Unexpected error: {str(e)}"
62
+ return False, '', f"Unexpected error: {str(e)}"
52
63
 
53
64
 
54
65
  def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Dict[str, Any]:
@@ -56,7 +67,7 @@ def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Di
56
67
  Read a Jupyter notebook and filter cells to keep only cell_type and source fields.
57
68
 
58
69
  Args:
59
- notebook_path (str): Absolute path to the .ipynb file
70
+ notebook_path (str): Path to the .ipynb file (can be relative or absolute)
60
71
 
61
72
  Returns:
62
73
  dict: Filtered notebook dictionary with only cell_type and source in cells
@@ -66,6 +77,11 @@ def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Di
66
77
  json.JSONDecodeError: If the file is not valid JSON
67
78
  KeyError: If the notebook doesn't have the expected structure
68
79
  """
80
+ # Convert to absolute path if it's not already absolute
81
+ # Handle both Unix-style absolute paths (starting with /) and Windows-style absolute paths
82
+ if not (notebook_path.startswith('/') or (len(notebook_path) > 1 and notebook_path[1] == ':')):
83
+ notebook_path = os.path.join(os.getcwd(), notebook_path)
84
+
69
85
  try:
70
86
  # Read the notebook file
71
87
  with open(notebook_path, 'r', encoding='utf-8') as f:
@@ -96,3 +112,25 @@ def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Di
96
112
  raise json.JSONDecodeError(f"Invalid JSON in notebook file: {str(e)}", e.doc if hasattr(e, 'doc') else '', e.pos if hasattr(e, 'pos') else 0)
97
113
  except Exception as e:
98
114
  raise Exception(f"Error processing notebook: {str(e)}")
115
+
116
+
117
+ def resolve_notebook_path(notebook_path:str) -> str:
118
+ # Convert to absolute path if it's not already absolute
119
+ # Handle both Unix-style absolute paths (starting with /) and Windows-style absolute paths
120
+ if not (notebook_path.startswith('/') or (len(notebook_path) > 1 and notebook_path[1] == ':')):
121
+ notebook_path = os.path.join(os.getcwd(), notebook_path)
122
+ return notebook_path
123
+
124
+ def clean_directory_check(notebook_path: str) -> None:
125
+ notebook_path = resolve_notebook_path(notebook_path)
126
+ # pathlib handles the cross OS path conversion automatically
127
+ path = Path(notebook_path).resolve()
128
+ dir_path = path.parent
129
+
130
+ if not dir_path.exists():
131
+ raise ValueError(f"Directory does not exist: {dir_path}")
132
+
133
+ file_count = len([f for f in dir_path.iterdir() if f.is_file()])
134
+ if file_count > 10:
135
+ raise ValueError(
136
+ f"Too many files in directory: 10 allowed but {file_count} present. Create a new directory and retry")
@@ -0,0 +1,116 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import sys
5
+ import os
6
+ import time
7
+ import requests
8
+ import tempfile
9
+ import shutil
10
+ import traceback
11
+ import ast
12
+ import importlib.util
13
+ import warnings
14
+ from typing import List, Tuple, Optional, Dict, Any, Generator
15
+ from streamlit.testing.v1 import AppTest
16
+ from contextlib import contextmanager
17
+ from mito_ai.streamlit_conversion.streamlit_utils import resolve_notebook_path
18
+
19
+
20
+ # warnings.filterwarnings("ignore", message=r".*missing ScriptRunContext.*")
21
+ # warnings.filterwarnings("ignore", category=UserWarning)
22
+
23
+ warnings.filterwarnings("ignore", message=".*bare mode.*")
24
+
25
+
26
+ class StreamlitValidator:
27
+ def __init__(self, port: int = 8501) -> None:
28
+ self.temp_dir: Optional[str] = None
29
+
30
+ def get_syntax_error(self, app_code: str) -> Optional[str]:
31
+ """Check if the Python code has valid syntax"""
32
+ try:
33
+ ast.parse(app_code)
34
+ return None
35
+ except SyntaxError as e:
36
+ error_msg = ''.join(traceback.format_exception(type(e), e, e.__traceback__))
37
+ return error_msg
38
+
39
+ def get_runtime_errors(self, app_code: str, app_path: str) -> Optional[List[Dict[str, Any]]]:
40
+ """Start the Streamlit app in a subprocess"""
41
+
42
+ directory = os.path.dirname(app_path)
43
+
44
+ @contextmanager
45
+ def change_working_directory(path: str) -> Generator[None, Any, None]:
46
+ """
47
+ Context manager to temporarily change working directory
48
+ so that relative paths are still valid when we run the app
49
+ """
50
+ if path == '':
51
+ yield
52
+
53
+ original_cwd = os.getcwd()
54
+ try:
55
+ os.chdir(path)
56
+ yield
57
+ finally:
58
+ os.chdir(original_cwd)
59
+
60
+ with change_working_directory(directory):
61
+ app_test = AppTest.from_string(app_code, default_timeout=30)
62
+ app_test.run()
63
+
64
+ # Check for exceptions
65
+ if app_test.exception:
66
+ errors = [{'type': 'exception', 'details': exc.value, 'message': exc.message, 'stack_trace': exc.stack_trace} for exc in app_test.exception]
67
+ return errors
68
+
69
+ # Check for error messages
70
+ if app_test.error:
71
+ errors = [{'type': 'error', 'details': err.value} for err in app_test.error]
72
+ return errors
73
+
74
+ return None
75
+
76
+ def cleanup(self) -> None:
77
+ """Clean up the temporary files"""
78
+ if self.temp_dir and os.path.exists(self.temp_dir):
79
+ shutil.rmtree(self.temp_dir)
80
+ self.temp_dir = None
81
+
82
+ def _validate_app(self, app_code: str, app_path: str) -> List[Dict[str, Any]]:
83
+ """Complete validation pipeline"""
84
+ errors: List[Dict[str, Any]] = []
85
+
86
+ try:
87
+ # Step 1: Check syntax
88
+ syntax_error = self.get_syntax_error(app_code)
89
+ if syntax_error:
90
+ errors.append({'type': 'syntax', 'details': syntax_error})
91
+
92
+ runtime_errors = self.get_runtime_errors(app_code, app_path)
93
+
94
+ print('Found Runtime Errors', runtime_errors)
95
+
96
+ if runtime_errors:
97
+ errors.extend(runtime_errors)
98
+
99
+ except Exception as e:
100
+ errors.append({'type': 'validation', 'details': str(e)})
101
+
102
+ finally:
103
+ self.cleanup()
104
+
105
+ return errors
106
+
107
+ def validate_app(app_code: str, notebook_path: str) -> Tuple[bool, List[str]]:
108
+ """Convenience function to validate Streamlit code"""
109
+ notebook_path = resolve_notebook_path(notebook_path)
110
+
111
+ validator = StreamlitValidator()
112
+ errors = validator._validate_app(app_code, notebook_path)
113
+
114
+ has_validation_error = len(errors) > 0
115
+ stringified_errors = [str(error) for error in errors]
116
+ return has_validation_error, stringified_errors
@@ -70,8 +70,8 @@ class StreamlitPreviewHandler(APIHandler):
70
70
 
71
71
  # If still not found, return the original path (will cause a clear error)
72
72
  # This ensures we get a meaningful error message rather than a generic "file not found"
73
- return notebook_path
74
-
73
+ return os.path.join(os.getcwd(), notebook_path)
74
+
75
75
  @tornado.web.authenticated
76
76
  async def post(self) -> None:
77
77
  """Start a new streamlit preview.
@@ -97,7 +97,7 @@ class StreamlitPreviewHandler(APIHandler):
97
97
  return
98
98
 
99
99
  notebook_path = body.get('notebook_path')
100
-
100
+
101
101
  if not notebook_path:
102
102
  self.set_status(400)
103
103
  self.finish({"error": 'Missing notebook_path parameter'})
@@ -110,6 +110,7 @@ class StreamlitPreviewHandler(APIHandler):
110
110
  preview_id = str(uuid.uuid4())
111
111
 
112
112
  # Generate streamlit code using existing handler
113
+ print('notebook_path', notebook_path)
113
114
  success, app_path, message = await streamlit_handler(resolved_notebook_path)
114
115
 
115
116
  if not success or app_path is None:
@@ -136,7 +137,9 @@ class StreamlitPreviewHandler(APIHandler):
136
137
  except Exception as e:
137
138
  print(f"Error in streamlit preview handler: {e}")
138
139
  self.set_status(500)
139
- self.finish({"error": f'Internal server error: {str(e)}'})
140
+
141
+ # Respond with the error
142
+ self.finish({"error": str(e)})
140
143
 
141
144
  @tornado.web.authenticated
142
145
  def delete(self, preview_id: str) -> None: