mito-ai 0.1.35__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (54) hide show
  1. mito_ai/__init__.py +6 -4
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +3 -10
  4. mito_ai/app_builder/handlers.py +89 -11
  5. mito_ai/app_builder/models.py +3 -0
  6. mito_ai/auth/README.md +18 -0
  7. mito_ai/auth/__init__.py +6 -0
  8. mito_ai/auth/handlers.py +96 -0
  9. mito_ai/auth/urls.py +13 -0
  10. mito_ai/completions/completion_handlers/chat_completion_handler.py +2 -2
  11. mito_ai/completions/models.py +7 -6
  12. mito_ai/completions/prompt_builders/agent_execution_prompt.py +8 -3
  13. mito_ai/completions/prompt_builders/agent_system_message.py +21 -7
  14. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  15. mito_ai/completions/prompt_builders/utils.py +53 -10
  16. mito_ai/constants.py +11 -1
  17. mito_ai/streamlit_conversion/streamlit_agent_handler.py +112 -0
  18. mito_ai/streamlit_conversion/streamlit_system_prompt.py +42 -0
  19. mito_ai/streamlit_conversion/streamlit_utils.py +96 -0
  20. mito_ai/streamlit_conversion/validate_and_run_streamlit_code.py +207 -0
  21. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  22. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  23. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +265 -0
  24. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +197 -0
  25. mito_ai/tests/streamlit_conversion/test_validate_and_run_streamlit_code.py +418 -0
  26. mito_ai/tests/test_constants.py +18 -3
  27. mito_ai/utils/anthropic_utils.py +18 -70
  28. mito_ai/utils/gemini_utils.py +22 -73
  29. mito_ai/utils/mito_server_utils.py +147 -4
  30. mito_ai/utils/open_ai_utils.py +18 -107
  31. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +100 -100
  32. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  33. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  34. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.831f63b48760c7119b9b.js +1165 -539
  35. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.831f63b48760c7119b9b.js.map +1 -0
  36. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.93ecc9bc0edba61535cc.js +18 -14
  37. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.93ecc9bc0edba61535cc.js.map +1 -0
  38. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js → mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +6 -2
  39. mito_ai-0.1.37.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  40. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/METADATA +1 -1
  41. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/RECORD +51 -38
  42. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a20772bc113422d0f505.js.map +0 -1
  43. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.d2eea6519fa332d79efb.js.map +0 -1
  44. mito_ai-0.1.35.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.76efcc5c3be4056457ee.js.map +0 -1
  45. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  46. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  47. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  48. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js +0 -0
  49. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -0
  50. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  51. {mito_ai-0.1.35.data → mito_ai-0.1.37.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  52. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/WHEEL +0 -0
  53. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/entry_points.txt +0 -0
  54. {mito_ai-0.1.35.dist-info → mito_ai-0.1.37.dist-info}/licenses/LICENSE +0 -0
@@ -1,22 +1,65 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
- from typing import List, Optional
4
+ from typing import List, Optional, Dict
5
5
  from mito_ai.rules.utils import get_rule
6
6
 
7
- def get_rules_str(selected_rules: Optional[List[str]]) -> str:
7
+
8
+ def get_rules_str(additional_context: Optional[List[Dict[str, str]]]) -> str:
8
9
  """
9
- Get a string of the rules that the user has selected.
10
+ Extract the rules from the additional context array, and retrieve the rule content.
10
11
  """
11
- if selected_rules is None:
12
- return ''
13
-
14
- rules_str = ''
12
+ if not additional_context:
13
+ return ""
14
+
15
+ selected_rules = [context["value"] for context in additional_context if context.get("type") == "rule"]
16
+ if len(selected_rules) == 0:
17
+ return ""
18
+
19
+ rules_str = ""
15
20
  for rule in selected_rules:
16
21
  rule_content = get_rule(rule)
17
- if rule_content is None or rule_content == '':
22
+ if rule_content is None or rule_content == "":
18
23
  continue
19
-
24
+
20
25
  rules_str += f"===========\n\nCustom Instructions Provided by User: {rule}\n\n{rule_content}\n\n==========="
21
-
26
+
22
27
  return rules_str
28
+
29
+
30
+ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]]) -> str:
31
+ """
32
+ Get the selected context from the additional context array.
33
+ """
34
+ if not additional_context:
35
+ return ""
36
+
37
+ # STEP 1: Extract each context type into a separate list
38
+ selected_variables = [context["value"] for context in additional_context if context.get("type") == "variable"]
39
+ selected_files = [context["value"] for context in additional_context if context.get("type") == "file"]
40
+ selected_db_connections = [context["value"] for context in additional_context if context.get("type") == "db"]
41
+
42
+ # STEP 2: Create a list of strings (instructions) for each context type
43
+ context_parts = []
44
+
45
+ if len(selected_variables) > 0:
46
+ context_parts.append(
47
+ "The following variables have been selected by the user to be used in the task:\n"
48
+ + "\n".join(selected_variables)
49
+ )
50
+
51
+ if len(selected_files) > 0:
52
+ context_parts.append(
53
+ "The following files have been selected by the user to be used in the task:\n"
54
+ + "\n".join(selected_files)
55
+ )
56
+
57
+ if len(selected_db_connections) > 0:
58
+ context_parts.append(
59
+ "The following database connections have been selected by the user to be used in the task:\n"
60
+ + "\n".join(selected_db_connections)
61
+ )
62
+
63
+ # STEP 3: Combine into a single string
64
+
65
+ return "\n\n".join(context_parts)
mito_ai/constants.py CHANGED
@@ -46,4 +46,14 @@ MITO_STREAMLIT_TEST_BASE_URL = "https://iyual08t6d.execute-api.us-east-1.amazona
46
46
 
47
47
  # Set ACTIVE_BASE_URL manually
48
48
  # TODO: Modify to PROD url before release
49
- ACTIVE_STREAMLIT_BASE_URL = MITO_STREAMLIT_TEST_BASE_URL # Change to MITO_STREAMLIT_DEV_BASE_URL for dev
49
+ ACTIVE_STREAMLIT_BASE_URL = MITO_STREAMLIT_DEV_BASE_URL # Change to MITO_STREAMLIT_DEV_BASE_URL for dev
50
+
51
+ # AWS Cognito configuration
52
+ COGNITO_CONFIG_DEV = {
53
+ 'TOKEN_ENDPOINT': 'https://mito-app-auth.auth.us-east-1.amazoncognito.com/oauth2/token',
54
+ 'CLIENT_ID': '6ara3u3l8sss738hrhbq1qtiqf',
55
+ 'CLIENT_SECRET': '',
56
+ 'REDIRECT_URI': 'http://localhost:8888/lab'
57
+ }
58
+
59
+ ACTIVE_COGNITO_CONFIG = COGNITO_CONFIG_DEV # Change to COGNITO_CONFIG_DEV for dev
@@ -0,0 +1,112 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import logging
5
+ from anthropic.types import MessageParam
6
+ from typing import List, Tuple, cast
7
+
8
+ from mito_ai.logger import get_logger
9
+ from mito_ai.streamlit_conversion.streamlit_system_prompt import streamlit_system_prompt
10
+ from mito_ai.streamlit_conversion.validate_and_run_streamlit_code import streamlit_code_validator
11
+ from mito_ai.streamlit_conversion.streamlit_utils import extract_code_blocks, create_app_file, parse_jupyter_notebook_to_extract_required_content
12
+ from mito_ai.utils.anthropic_utils import stream_anthropic_completion_from_mito_server
13
+ from mito_ai.completions.models import MessageType
14
+
15
+ STREAMLIT_AI_MODEL = "claude-3-5-haiku-latest"
16
+
17
+ class StreamlitCodeGeneration:
18
+ def __init__(self, notebook: dict) -> None:
19
+
20
+ self.messages: List[MessageParam] = [
21
+ cast(MessageParam, {
22
+ "role": "user",
23
+ "content": [{
24
+ "type": "text",
25
+ "text": f"Here is my jupyter notebook content that I want to convert into a Streamlit dashboard - {notebook}"
26
+ }]
27
+ })
28
+ ]
29
+
30
+ @property
31
+ def log(self) -> logging.Logger:
32
+ """Use Mito AI logger."""
33
+ return get_logger()
34
+
35
+ async def get_response_from_agent(self, message_to_agent: List[MessageParam]) -> str:
36
+ """Gets the streaming response from the agent using the mito server"""
37
+ model = STREAMLIT_AI_MODEL
38
+ max_tokens = 8192 # 64_000
39
+ temperature = 0.2
40
+
41
+ self.log.info("Getting response from agent...")
42
+ accumulated_response = ""
43
+ async for stream_chunk in stream_anthropic_completion_from_mito_server(
44
+ model = model,
45
+ max_tokens = max_tokens,
46
+ temperature = temperature,
47
+ system = streamlit_system_prompt,
48
+ messages = message_to_agent,
49
+ stream=True,
50
+ message_type=MessageType.STREAMLIT_CONVERSION,
51
+ reply_fn=None,
52
+ message_id=""
53
+ ):
54
+ accumulated_response += stream_chunk
55
+ return accumulated_response
56
+
57
+ def add_agent_response_to_context(self, agent_response: str) -> None:
58
+ """Add the agent's response to the history"""
59
+ self.messages.append(
60
+ cast(MessageParam, {
61
+ "role": "assistant",
62
+ "content": [{
63
+ "type": "text",
64
+ "text": agent_response
65
+ }]
66
+ })
67
+ )
68
+
69
+ async def generate_streamlit_code(self) -> str:
70
+ """Send a query to the agent, get its response and parse the code"""
71
+ agent_response = await self.get_response_from_agent(self.messages)
72
+
73
+ converted_code = extract_code_blocks(agent_response)
74
+ self.add_agent_response_to_context(converted_code)
75
+ return converted_code
76
+
77
+
78
+ async def correct_error_in_generation(self, error: str) -> str:
79
+ """If errors are present, send it back to the agent to get corrections in code"""
80
+ self.messages.append(
81
+ cast(MessageParam, {
82
+ "role": "user",
83
+ "content": [{
84
+ "type": "text",
85
+ "text": f"When I run the streamlit app code, I get the following error: {error}\nPlease return the FULL Streamlit app code with the error corrected"
86
+ }]
87
+ })
88
+ )
89
+ agent_response = await self.get_response_from_agent(self.messages)
90
+ converted_code = extract_code_blocks(agent_response)
91
+ self.add_agent_response_to_context(converted_code)
92
+
93
+ return converted_code
94
+
95
+
96
+ async def streamlit_handler(notebook_path: str, app_path: str) -> Tuple[bool, str]:
97
+ """Handler function for streamlit code generation and validation"""
98
+ notebook_code = parse_jupyter_notebook_to_extract_required_content(notebook_path)
99
+ streamlit_code_generator = StreamlitCodeGeneration(notebook_code)
100
+ streamlit_code = await streamlit_code_generator.generate_streamlit_code()
101
+ has_validation_error, error = streamlit_code_validator(streamlit_code)
102
+ tries = 0
103
+ while has_validation_error and tries < 5:
104
+ streamlit_code = await streamlit_code_generator.correct_error_in_generation(error)
105
+ has_validation_error, error = streamlit_code_validator(streamlit_code)
106
+ tries+=1
107
+
108
+ if has_validation_error:
109
+ return False, "Error generating streamlit code by agent"
110
+
111
+ success_flag, message = create_app_file(app_path, streamlit_code)
112
+ return success_flag, message
@@ -0,0 +1,42 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ streamlit_system_prompt = """You are a senior data scientist and Streamlit expert specializing in converting Jupyter notebooks into professional dashboard applications.
5
+
6
+ ROLE AND EXPERTISE:
7
+ - Expert in Python, Jupyter notebooks, Streamlit, and data visualization
8
+ - Experienced in creating executive-ready dashboards for business stakeholders
9
+ - Skilled in translating technical analysis into clear, interactive presentations
10
+
11
+ TASK REQUIREMENTS:
12
+ 1. Convert Jupyter notebook content into a complete Streamlit application (app.py)
13
+ 2. Preserve ALL outputs from code cells and markdown cells as they appear in the notebook
14
+ 3. Maintain the logical flow and structure of the original analysis
15
+ 4. Create an executive-friendly dashboard suitable for company leadership
16
+
17
+ STREAMLIT IMPLEMENTATION GUIDELINES:
18
+ - Use appropriate Streamlit components (st.title, st.header, st.subheader, st.markdown, etc.)
19
+ - Display all visualizations using st.pyplot(), st.plotly_chart(), or st.altair_chart() as appropriate
20
+ - Show dataframes and tables using st.dataframe() or st.table()
21
+ - Include all text explanations and insights from markdown cells
22
+ - Add interactive elements where beneficial (filters, selectors, etc.)
23
+ - Ensure professional styling and layout suitable for executives
24
+
25
+ CODE STRUCTURE:
26
+ - Generate a complete, runnable app.py file
27
+ - Include all necessary imports
28
+ - Handle data loading and processing
29
+ - Organize content with clear sections and headers
30
+ - Include comments explaining key sections
31
+
32
+ OUTPUT FORMAT:
33
+ - Provide the complete app.py file code
34
+ - Ensure all notebook outputs are faithfully reproduced
35
+ - Make the dashboard professional and presentation-ready
36
+ - Focus on clarity and executive-level communication
37
+ - Don't give extra explanations, just give the python code
38
+ - Do NOT add emojis
39
+ - Do NOT modify the graphs or analysis
40
+ - Do NOT provide your own interpretations for the analysis
41
+
42
+ Remember: The goal is to transform technical analysis into a polished, interactive/visually appealing dashboard that executives can easily understand and navigate."""
@@ -0,0 +1,96 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import re
5
+ import json
6
+ from typing import Dict, Tuple, Any
7
+
8
+ def extract_code_blocks(message_content: str) -> str:
9
+ """
10
+ Extract all code blocks from Claude's response.
11
+
12
+ Args:
13
+ message_content (str): The actual content from the agent's response
14
+
15
+ Returns:
16
+ str: Removes the ```python``` part to be able to parse the code
17
+ """
18
+ if "```python" not in message_content:
19
+ return message_content
20
+
21
+ # return message_content.split('```python\n')[1].split('\n```')[0]
22
+ # Use regex to find all Python code blocks
23
+ pattern = r'```python\n(.*?)```'
24
+ matches = re.findall(pattern, message_content, re.DOTALL)
25
+
26
+ # Concatenate with single newlines
27
+ return '\n'.join(matches)
28
+
29
+
30
+ def create_app_file(file_path: str, code: str) -> Tuple[bool, str]:
31
+ """
32
+ Create app.py file and write code to it with error handling
33
+
34
+ Args:
35
+ file_path (str): The actual content from the agent's response
36
+ code (str): The actual content from the agent's response
37
+
38
+ Returns:
39
+ str: Removes the ```python``` part to be able to parse the code
40
+
41
+ """
42
+ try:
43
+ with open(file_path+"/app.py", 'w') as f:
44
+ f.write(code)
45
+ return True, f"Successfully created {file_path}"
46
+ except IOError as e:
47
+ return False, f"Error creating file: {str(e)}"
48
+ except Exception as e:
49
+ return False, f"Unexpected error: {str(e)}"
50
+
51
+
52
+ def parse_jupyter_notebook_to_extract_required_content(notebook_path: str) -> Dict[str, Any]:
53
+ """
54
+ Read a Jupyter notebook and filter cells to keep only cell_type and source fields.
55
+
56
+ Args:
57
+ notebook_path (str): Absolute path to the .ipynb file
58
+
59
+ Returns:
60
+ dict: Filtered notebook dictionary with only cell_type and source in cells
61
+
62
+ Raises:
63
+ FileNotFoundError: If the notebook file doesn't exist
64
+ json.JSONDecodeError: If the file is not valid JSON
65
+ KeyError: If the notebook doesn't have the expected structure
66
+ """
67
+ try:
68
+ # Read the notebook file
69
+ with open(notebook_path, 'r', encoding='utf-8') as f:
70
+ notebook_data: Dict[str, Any] = json.load(f)
71
+
72
+ # Check if 'cells' key exists
73
+ if 'cells' not in notebook_data:
74
+ raise KeyError("Notebook does not contain 'cells' key")
75
+
76
+ # Filter each cell to keep only cell_type and source
77
+ filtered_cells = []
78
+ for cell in notebook_data['cells']:
79
+ filtered_cell = {
80
+ 'cell_type': cell.get('cell_type', ''),
81
+ 'source': cell.get('source', [])
82
+ }
83
+ filtered_cells.append(filtered_cell)
84
+
85
+ # Update the notebook data with filtered cells
86
+ notebook_data['cells'] = filtered_cells
87
+
88
+ return notebook_data
89
+
90
+ except FileNotFoundError:
91
+ raise FileNotFoundError(f"Notebook file not found: {notebook_path}")
92
+ except json.JSONDecodeError as e:
93
+ # JSONDecodeError requires msg, doc, pos
94
+ raise json.JSONDecodeError(f"Invalid JSON in notebook file: {str(e)}", e.doc if hasattr(e, 'doc') else '', e.pos if hasattr(e, 'pos') else 0)
95
+ except Exception as e:
96
+ raise Exception(f"Error processing notebook: {str(e)}")
@@ -0,0 +1,207 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import subprocess
5
+ import sys
6
+ import os
7
+ import time
8
+ import requests
9
+ import tempfile
10
+ import shutil
11
+ import traceback
12
+ import ast
13
+ import importlib.util
14
+ import warnings
15
+ from typing import Tuple, Optional, Dict, Any
16
+ from subprocess import Popen
17
+
18
+ # warnings.filterwarnings("ignore", message=r".*missing ScriptRunContext.*")
19
+ # warnings.filterwarnings("ignore", category=UserWarning)
20
+
21
+ warnings.filterwarnings("ignore", message=".*bare mode.*")
22
+
23
+
24
+ class StreamlitValidator:
25
+ def __init__(self, port: int = 8501, timeout: int = 30) -> None:
26
+ self.port = port
27
+ self.timeout = timeout
28
+ self.process: Optional[Popen[str]] = None
29
+ self.temp_dir: Optional[str] = None
30
+
31
+ def validate_syntax(self, app_code: str) -> Tuple[bool, str]:
32
+ """Check if the Python code has valid syntax"""
33
+ try:
34
+ ast.parse(app_code)
35
+ return True, "Syntax is valid"
36
+ except SyntaxError as e:
37
+ error_msg = ''.join(traceback.format_exception(type(e), e, e.__traceback__))
38
+ return False, f"Syntax error: {error_msg}"
39
+
40
+ def create_temp_app(self, app_code: str) -> str:
41
+ """Create a temporary Streamlit app file"""
42
+ self.temp_dir = tempfile.mkdtemp()
43
+ if self.temp_dir is None:
44
+ raise RuntimeError("Failed to create temporary directory")
45
+ app_path = os.path.join(self.temp_dir, "app.py")
46
+
47
+ with open(app_path, 'w') as f:
48
+ f.write(app_code)
49
+
50
+ return app_path
51
+
52
+ def start_streamlit_app(self, app_path: str) -> Tuple[bool, str]:
53
+ """Start the Streamlit app in a subprocess"""
54
+ try:
55
+ cmd = [
56
+ sys.executable, "-m", "streamlit", "run", app_path,
57
+ "--server.port", str(self.port),
58
+ "--server.headless", "true",
59
+ "--server.address", "localhost",
60
+ "--logger.level", "error"
61
+ ]
62
+
63
+ self.process = subprocess.Popen(
64
+ cmd,
65
+ stdout=subprocess.PIPE,
66
+ stderr=subprocess.PIPE,
67
+ text=True
68
+ )
69
+
70
+ return True, "Streamlit app started"
71
+ except Exception as e:
72
+ return False, f"Failed to start Streamlit: {str(e)}"
73
+
74
+ def wait_for_app(self) -> Tuple[bool, str]:
75
+ """Wait for the Streamlit app to be ready"""
76
+ start_time = time.time()
77
+
78
+ exception_error = "Error"
79
+ while time.time() - start_time < self.timeout:
80
+ try:
81
+ response = requests.get(f"http://localhost:{self.port}", timeout=5)
82
+ if response.status_code == 200:
83
+ return True, "App is running successfully"
84
+ except requests.exceptions.RequestException as e:
85
+ exception_error = str(e)
86
+
87
+ time.sleep(1)
88
+
89
+ return False, f"App failed to start within timeout - {exception_error}"
90
+
91
+ def filter_streamlit_warnings(self, text: str) -> str:
92
+ """Filter out known Streamlit warnings that can be safely ignored"""
93
+ if not text:
94
+ return text
95
+
96
+ filtered_lines = []
97
+ for line in text.split('\n'):
98
+ # Skip lines containing ScriptRunContext warnings
99
+ if any(phrase in line for phrase in [
100
+ 'missing ScriptRunContext',
101
+ 'bare mode',
102
+ 'ScriptRunContext!',
103
+ 'Thread \'MainThread\':'
104
+ ]):
105
+ continue
106
+ filtered_lines.append(line)
107
+
108
+ return '\n'.join(filtered_lines)
109
+
110
+ def check_for_errors(self) -> Tuple[bool, str]:
111
+ """Check if the Streamlit process has any errors"""
112
+ if self.process:
113
+ # Check if process is still running
114
+ if self.process.poll() is not None:
115
+ stdout, stderr = self.process.communicate()
116
+ # Filter out known warnings
117
+ filtered_stderr = self.filter_streamlit_warnings(stderr)
118
+ if filtered_stderr.strip():
119
+ return False, f"App crashed: {filtered_stderr}"
120
+
121
+ return True, "App is running without errors"
122
+
123
+ return False, "No process found"
124
+
125
+ def cleanup(self) -> None:
126
+ """Clean up the temporary files and stop the process"""
127
+ if self.process:
128
+ self.process.terminate()
129
+ self.process.wait()
130
+ self.process = None
131
+
132
+ if self.temp_dir and os.path.exists(self.temp_dir):
133
+ shutil.rmtree(self.temp_dir)
134
+ self.temp_dir = None
135
+
136
+ def validate_app(self, app_code: str) -> Dict[str, Any]:
137
+ """Complete validation pipeline"""
138
+ results: Dict[str, Any] = {
139
+ 'syntax_valid': False,
140
+ 'app_starts': False,
141
+ 'app_responsive': False,
142
+ 'errors': []
143
+ }
144
+
145
+ try:
146
+ # Step 1: Check syntax
147
+ syntax_valid, syntax_msg = self.validate_syntax(app_code)
148
+ results['syntax_valid'] = syntax_valid
149
+ if not syntax_valid:
150
+ results['errors'].append(syntax_msg)
151
+ return results
152
+
153
+ # Step 2: Create and start app
154
+ app_path = self.create_temp_app(app_code)
155
+ app_started, start_msg = self.start_streamlit_app(app_path)
156
+ results['app_starts'] = app_started
157
+
158
+ if not app_started:
159
+ results['errors'].append(start_msg)
160
+ return results
161
+
162
+ # Step 3: Wait for app to be ready
163
+ app_ready, ready_msg = self.wait_for_app()
164
+ results['app_responsive'] = app_ready
165
+
166
+ if not app_ready:
167
+ results['errors'].append(ready_msg)
168
+
169
+ # Step 4: Check for runtime errors
170
+ no_errors, error_msg = self.check_for_errors()
171
+ if not no_errors:
172
+ results['errors'].append(error_msg)
173
+
174
+ except Exception as e:
175
+ results['errors'].append(f"Validation error: {str(e)}")
176
+
177
+ finally:
178
+ self.cleanup()
179
+
180
+ return results
181
+
182
+
183
+ def streamlit_code_validator(app_code: str) -> Tuple[bool, str]:
184
+ """Convenience function to validate Streamlit code"""
185
+ has_validation_error: bool = False
186
+ error_message: str = ""
187
+
188
+
189
+ validator = StreamlitValidator()
190
+ results = validator.validate_app(app_code)
191
+
192
+ print("Validation Results:")
193
+ print(f"✓ Syntax valid: {results['syntax_valid']}")
194
+ print(f"✓ App starts: {results['app_starts']}")
195
+ print(f"✓ App responsive: {results['app_responsive']}")
196
+
197
+ if results['errors']:
198
+ error_message = "Errors found: "
199
+ print("Error detected in agent code")
200
+ has_validation_error = True
201
+ print("\nErrors found:")
202
+ for error in results['errors']:
203
+ print(f" - {error}")
204
+ error_message += error + "\n"
205
+ if not has_validation_error:
206
+ print("\nAll validations passed!")
207
+ return has_validation_error, error_message
@@ -0,0 +1,140 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import asyncio
5
+ import pytest
6
+ from unittest.mock import AsyncMock, MagicMock, patch, call
7
+
8
+ from mito_ai.utils.mito_server_utils import stream_response_from_mito_server
9
+ from mito_ai.completions.models import MessageType
10
+
11
+
12
+ # Mock classes for CompletionStreamChunk, CompletionItem, etc.
13
+ class CompletionItem:
14
+ def __init__(self, content: str, isIncomplete: bool, token: str):
15
+ self.content = content
16
+ self.isIncomplete = isIncomplete
17
+ self.token = token
18
+
19
+ def __eq__(self, other):
20
+ return (
21
+ self.content == other.content
22
+ and self.isIncomplete == other.isIncomplete
23
+ and self.token == other.token
24
+ )
25
+
26
+
27
+ class CompletionStreamChunk:
28
+ def __init__(self, parent_id: str, chunk: CompletionItem, done: bool):
29
+ self.parent_id = parent_id
30
+ self.chunk = chunk
31
+ self.done = done
32
+
33
+ def __eq__(self, other):
34
+ return (
35
+ self.parent_id == other.parent_id
36
+ and self.chunk == other.chunk
37
+ and self.done == other.done
38
+ )
39
+
40
+
41
+ @pytest.mark.asyncio
42
+ async def test_stream_response_happy_path(monkeypatch):
43
+ # Arrange
44
+ url = "https://fake.mito.server/stream"
45
+ headers = {"Authorization": "Bearer token"}
46
+ data = {"prompt": "hello world"}
47
+ timeout = 10
48
+ max_retries = 2
49
+ message_type = MessageType.CHAT
50
+ message_id = "msg-123"
51
+
52
+ # Fake chunks
53
+ raw_chunks = [b"chunk1", b"chunk2"]
54
+
55
+ # Mock reply_fn
56
+ reply_fn = MagicMock()
57
+
58
+ # Mock quota check/update
59
+ monkeypatch.setattr(
60
+ "mito_ai.utils.mito_server_utils.check_mito_server_quota", lambda *_: None
61
+ )
62
+ monkeypatch.setattr(
63
+ "mito_ai.utils.mito_server_utils.update_mito_server_quota", lambda *_: None
64
+ )
65
+
66
+ # Mock HTTPClient and fetch
67
+ chunk_callback = MagicMock()
68
+
69
+ class FakeHTTPClient:
70
+ def fetch(self, *args, **kwargs):
71
+ nonlocal chunk_callback
72
+ chunk_callback = kwargs["streaming_callback"]
73
+
74
+ async def fetch_simulation():
75
+ # Simulate streaming data
76
+ for chunk in raw_chunks:
77
+ await asyncio.sleep(0.01)
78
+ chunk_callback(chunk)
79
+ return MagicMock()
80
+
81
+ return fetch_simulation()
82
+
83
+ def close(self):
84
+ pass
85
+
86
+ def mock_create_http_client(timeout_val, retry_val):
87
+ return FakeHTTPClient(), timeout_val
88
+
89
+ monkeypatch.setattr(
90
+ "mito_ai.utils.mito_server_utils._create_http_client", mock_create_http_client
91
+ )
92
+
93
+ # Act
94
+ gen = stream_response_from_mito_server(
95
+ url=url,
96
+ headers=headers,
97
+ data=data,
98
+ timeout=timeout,
99
+ max_retries=max_retries,
100
+ message_type=message_type,
101
+ reply_fn=reply_fn,
102
+ message_id=message_id,
103
+ )
104
+
105
+ results = []
106
+ async for chunk in gen:
107
+ results.append(chunk)
108
+
109
+ # Assert
110
+ assert results == [b"chunk1".decode(), b"chunk2".decode()]
111
+
112
+ # Check reply_fn calls
113
+ expected_calls = [
114
+ call(
115
+ CompletionStreamChunk(
116
+ parent_id=message_id,
117
+ chunk=CompletionItem(
118
+ content="chunk1", isIncomplete=True, token=message_id
119
+ ),
120
+ done=False,
121
+ )
122
+ ),
123
+ call(
124
+ CompletionStreamChunk(
125
+ parent_id=message_id,
126
+ chunk=CompletionItem(
127
+ content="chunk2", isIncomplete=True, token=message_id
128
+ ),
129
+ done=False,
130
+ )
131
+ ),
132
+ call(
133
+ CompletionStreamChunk(
134
+ parent_id=message_id,
135
+ chunk=CompletionItem(content="", isIncomplete=False, token=message_id),
136
+ done=True,
137
+ )
138
+ ),
139
+ ]
140
+ reply_fn.assert_has_calls(expected_calls)