mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. mito_ai/__init__.py +49 -9
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +142 -67
  4. mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
  5. mito_ai/app_deploy/app_deploy_utils.py +44 -0
  6. mito_ai/app_deploy/handlers.py +345 -0
  7. mito_ai/{app_builder → app_deploy}/models.py +35 -22
  8. mito_ai/app_manager/__init__.py +4 -0
  9. mito_ai/app_manager/handlers.py +167 -0
  10. mito_ai/app_manager/models.py +71 -0
  11. mito_ai/app_manager/utils.py +24 -0
  12. mito_ai/auth/README.md +18 -0
  13. mito_ai/auth/__init__.py +6 -0
  14. mito_ai/auth/handlers.py +96 -0
  15. mito_ai/auth/urls.py +13 -0
  16. mito_ai/chat_history/handlers.py +63 -0
  17. mito_ai/chat_history/urls.py +32 -0
  18. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  19. mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
  20. mito_ai/completions/completion_handlers/utils.py +99 -37
  21. mito_ai/completions/handlers.py +57 -20
  22. mito_ai/completions/message_history.py +9 -1
  23. mito_ai/completions/models.py +31 -7
  24. mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
  25. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  26. mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
  27. mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
  28. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  29. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  30. mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
  31. mito_ai/completions/prompt_builders/utils.py +72 -10
  32. mito_ai/completions/providers.py +81 -47
  33. mito_ai/constants.py +25 -24
  34. mito_ai/file_uploads/__init__.py +3 -0
  35. mito_ai/file_uploads/handlers.py +248 -0
  36. mito_ai/file_uploads/urls.py +21 -0
  37. mito_ai/gemini_client.py +44 -48
  38. mito_ai/log/handlers.py +10 -3
  39. mito_ai/log/urls.py +3 -3
  40. mito_ai/openai_client.py +30 -44
  41. mito_ai/path_utils.py +70 -0
  42. mito_ai/streamlit_conversion/agent_utils.py +37 -0
  43. mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
  44. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  45. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
  46. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  47. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
  48. mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
  49. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  50. mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
  51. mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
  52. mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
  53. mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
  54. mito_ai/streamlit_preview/__init__.py +6 -0
  55. mito_ai/streamlit_preview/handlers.py +111 -0
  56. mito_ai/streamlit_preview/manager.py +152 -0
  57. mito_ai/streamlit_preview/urls.py +22 -0
  58. mito_ai/streamlit_preview/utils.py +29 -0
  59. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  60. mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
  61. mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
  62. mito_ai/tests/file_uploads/__init__.py +2 -0
  63. mito_ai/tests/file_uploads/test_handlers.py +282 -0
  64. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  65. mito_ai/tests/message_history/test_message_history_utils.py +103 -23
  66. mito_ai/tests/open_ai_utils_test.py +18 -22
  67. mito_ai/tests/providers/test_anthropic_client.py +447 -0
  68. mito_ai/tests/providers/test_azure.py +2 -6
  69. mito_ai/tests/providers/test_capabilities.py +120 -0
  70. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  71. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  72. mito_ai/tests/providers/test_model_resolution.py +130 -0
  73. mito_ai/tests/providers/test_openai_client.py +57 -0
  74. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  75. mito_ai/tests/providers/test_provider_limits.py +42 -0
  76. mito_ai/tests/providers/test_providers.py +382 -0
  77. mito_ai/tests/providers/test_retry_logic.py +389 -0
  78. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  79. mito_ai/tests/providers/utils.py +85 -0
  80. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  81. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
  82. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
  83. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
  84. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
  85. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
  86. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
  87. mito_ai/tests/test_constants.py +31 -3
  88. mito_ai/tests/test_telemetry.py +12 -0
  89. mito_ai/tests/user/__init__.py +2 -0
  90. mito_ai/tests/user/test_user.py +120 -0
  91. mito_ai/tests/utils/test_anthropic_utils.py +6 -6
  92. mito_ai/user/handlers.py +45 -0
  93. mito_ai/user/urls.py +21 -0
  94. mito_ai/utils/anthropic_utils.py +55 -121
  95. mito_ai/utils/create.py +17 -1
  96. mito_ai/utils/error_classes.py +42 -0
  97. mito_ai/utils/gemini_utils.py +39 -94
  98. mito_ai/utils/message_history_utils.py +7 -4
  99. mito_ai/utils/mito_server_utils.py +242 -0
  100. mito_ai/utils/open_ai_utils.py +38 -155
  101. mito_ai/utils/provider_utils.py +49 -0
  102. mito_ai/utils/server_limits.py +1 -1
  103. mito_ai/utils/telemetry_utils.py +137 -5
  104. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  105. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  106. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  107. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
  108. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
  109. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
  110. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  111. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  112. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
  113. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
  114. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
  115. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  116. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  117. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  118. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  119. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  120. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  121. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  122. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  123. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  124. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  125. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  126. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  127. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  128. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
  129. mito_ai-0.1.49.dist-info/RECORD +205 -0
  130. mito_ai/app_builder/handlers.py +0 -218
  131. mito_ai/tests/providers_test.py +0 -438
  132. mito_ai/tests/test_anthropic_client.py +0 -270
  133. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
  134. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
  135. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  136. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  137. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  138. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  139. mito_ai-0.1.33.dist-info/RECORD +0 -134
  140. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  141. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  142. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  143. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  144. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
  145. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
  146. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,112 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import os
5
+ import tempfile
6
+ from unittest.mock import patch, MagicMock
7
+ from mito_ai.streamlit_conversion.validate_streamlit_app import (
8
+ get_syntax_error,
9
+ get_runtime_errors,
10
+ validate_app
11
+ )
12
+ import pytest
13
+ from mito_ai.path_utils import AbsoluteNotebookPath
14
+
15
+
16
+ class TestGetSyntaxError:
17
+ """Test cases for get_syntax_error function"""
18
+
19
+ @pytest.mark.parametrize("code,expected_error,test_description", [
20
+ # Valid Python code should return no error
21
+ (
22
+ "import streamlit\nst.title('Hello World')",
23
+ None,
24
+ "valid Python code"
25
+ ),
26
+ # Invalid Python syntax should be caught
27
+ (
28
+ "import streamlit\nst.title('Hello World'",
29
+ "SyntaxError",
30
+ "invalid Python code"
31
+ ),
32
+ # Empty streamlit app is valid
33
+ (
34
+ "",
35
+ None,
36
+ "empty code"
37
+ ),
38
+ ])
39
+ def test_get_syntax_error(self, code, expected_error, test_description):
40
+ """Test syntax validation with various code inputs"""
41
+ error = get_syntax_error(code)
42
+
43
+ if expected_error is None:
44
+ assert error is None, f"Expected no error for {test_description}"
45
+ else:
46
+ assert error is not None, f"Expected error for {test_description}"
47
+ assert expected_error in error, f"Expected '{expected_error}' in error for {test_description}"
48
+
49
+ class TestGetRuntimeErrors:
50
+ """Test cases for get_runtime_errors function"""
51
+
52
+ @pytest.mark.parametrize("app_code,expected_error", [
53
+ ("x = 5", None),
54
+ ("1/0", "division by zero"),
55
+ ("", None)
56
+ ])
57
+ def test_get_runtime_errors(self, app_code, expected_error):
58
+ """Test getting runtime errors"""
59
+
60
+ absolute_path = AbsoluteNotebookPath('/notebook.ipynb')
61
+ errors = get_runtime_errors(app_code, absolute_path)
62
+
63
+ if expected_error is None:
64
+ assert errors is None
65
+ else:
66
+ errors_str = str(errors)
67
+ assert expected_error in errors_str
68
+
69
+ def test_get_runtime_errors_with_relative_path(self):
70
+ """Test getting runtime errors"""
71
+
72
+ app_code ="""
73
+ import streamlit as st
74
+ import pandas as pd
75
+
76
+ df=pd.read_csv('data.csv')
77
+ """
78
+ # Create a temporary csv file in the directory temp/data.csv
79
+ with tempfile.TemporaryDirectory() as temp_dir:
80
+ directory = 'app_directory'
81
+ csv_path = os.path.join(temp_dir, directory, "data.csv")
82
+
83
+ os.makedirs(os.path.join(temp_dir, directory), exist_ok=True)
84
+ app_path = os.path.join(temp_dir, directory, "app.py")
85
+
86
+ # Create the file if it doesn't exist
87
+ with open(csv_path, "w") as f:
88
+ f.write("name,age\nJohn,25\nJane,30")
89
+
90
+ errors = get_runtime_errors(app_code, AbsoluteNotebookPath(app_path))
91
+ assert errors is None
92
+
93
+ class TestValidateApp:
94
+ """Test cases for validate_app function"""
95
+
96
+ @pytest.mark.parametrize("app_code,expected_has_errors,expected_error_message", [
97
+ ("x=5", False, ""),
98
+ ("1/0", True, "division by zero"),
99
+ # Syntax errors are caught during runtime
100
+ ("", False, ""),
101
+ ])
102
+ def test_validate_app(self, app_code, expected_has_errors, expected_error_message):
103
+ """Test the validate_app function"""
104
+ errors = validate_app(app_code, AbsoluteNotebookPath('/notebook.ipynb'))
105
+
106
+ has_errors = len(errors) > 0
107
+ assert has_errors == expected_has_errors
108
+ if expected_error_message:
109
+ errors_str = str(errors)
110
+ assert expected_error_message in errors_str
111
+
112
+
@@ -0,0 +1,118 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ import os
6
+ import tempfile
7
+ from unittest.mock import patch, Mock, AsyncMock, MagicMock
8
+ from mito_ai.streamlit_preview.handlers import StreamlitPreviewHandler
9
+ from mito_ai.path_utils import AbsoluteNotebookPath
10
+
11
+
12
+ class TestStreamlitPreviewHandler:
13
+ """Test cases for the StreamlitPreviewHandler."""
14
+
15
+ @pytest.mark.asyncio
16
+ @pytest.mark.parametrize(
17
+ "app_exists,force_recreate,streamlit_handler_called",
18
+ [
19
+ # Test case 1: App exists, not forcing recreate - should not call streamlit_handler
20
+ (True, False, False),
21
+ # Test case 2: App doesn't exist - should call streamlit_handler
22
+ (False, False, True),
23
+ # Test case 3: App exists but forcing recreate - should call streamlit_handler
24
+ (True, True, True),
25
+ ],
26
+ ids=[
27
+ "app_exists_no_force_recreate",
28
+ "app_does_not_exist_generates_new_one",
29
+ "app_exists_force_recreate",
30
+ ]
31
+ )
32
+ async def test_post_handler_app_generation(
33
+ self,
34
+ app_exists,
35
+ force_recreate,
36
+ streamlit_handler_called,
37
+ ):
38
+ """Test StreamlitPreviewHandler POST method with various scenarios."""
39
+ with tempfile.TemporaryDirectory() as temp_dir:
40
+ notebook_path = os.path.join(temp_dir, "test_notebook.ipynb")
41
+ notebook_id = "test_notebook_id"
42
+ # App file name is derived from notebook_id
43
+ app_file_name = f"{notebook_id}.py"
44
+ app_path = os.path.join(temp_dir, app_file_name)
45
+
46
+ # Create notebook file
47
+ with open(notebook_path, "w") as f:
48
+ f.write('{"cells": []}')
49
+
50
+ # Create app file if it should exist
51
+ if app_exists:
52
+ with open(app_path, "w") as f:
53
+ f.write("import streamlit as st\nst.write('Hello World')")
54
+
55
+ # Create a properly mocked Tornado application with required attributes
56
+ mock_application = MagicMock()
57
+ mock_application.ui_methods = {}
58
+ mock_application.ui_modules = {}
59
+ mock_application.settings = {}
60
+
61
+ # Create a mock request with necessary tornado setup
62
+ mock_request = MagicMock()
63
+ mock_request.connection = MagicMock()
64
+ mock_request.connection.context = MagicMock()
65
+
66
+ # Create handler instance
67
+ handler = StreamlitPreviewHandler(
68
+ application=mock_application,
69
+ request=mock_request,
70
+ )
71
+ handler.initialize()
72
+
73
+ # Mock authentication - set current_user to bypass @tornado.web.authenticated
74
+ handler.current_user = "test_user" # type: ignore
75
+
76
+ # Mock the finish method and other handler methods
77
+ finish_called = []
78
+ def mock_finish_func(response):
79
+ finish_called.append(response)
80
+
81
+ # Mock streamlit_handler and preview manager
82
+ with patch.object(handler, 'get_json_body', return_value={
83
+ "notebook_path": notebook_path,
84
+ "notebook_id": notebook_id,
85
+ "force_recreate": force_recreate,
86
+ "edit_prompt": ""
87
+ }), \
88
+ patch.object(handler, 'finish', side_effect=mock_finish_func), \
89
+ patch.object(handler, 'set_status'), \
90
+ patch('mito_ai.streamlit_preview.handlers.streamlit_handler', new_callable=AsyncMock) as mock_streamlit_handler, \
91
+ patch.object(handler.preview_manager, 'start_streamlit_preview', return_value=8501) as mock_start_preview, \
92
+ patch('mito_ai.streamlit_preview.handlers.log_streamlit_app_preview_success'):
93
+
94
+ # Call the handler
95
+ await handler.post() # type: ignore[misc]
96
+
97
+ # Verify streamlit_handler was called or not called as expected
98
+ if streamlit_handler_called:
99
+ assert mock_streamlit_handler.called
100
+ # Verify it was called with the correct arguments
101
+ call_args = mock_streamlit_handler.call_args
102
+ assert call_args[0][0] == os.path.abspath(notebook_path) # First argument should be the absolute notebook path
103
+ assert call_args[0][1] == app_file_name # Second argument should be the app file name
104
+ assert call_args[0][2] == "" # Third argument should be the edit_prompt
105
+ else:
106
+ mock_streamlit_handler.assert_not_called()
107
+
108
+ # Verify preview was started
109
+ mock_start_preview.assert_called_once()
110
+
111
+ # Verify response was sent
112
+ assert len(finish_called) == 1
113
+ response = finish_called[0]
114
+ assert response["type"] == "success"
115
+ assert "port" in response
116
+ assert "id" in response
117
+
118
+
@@ -0,0 +1,292 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ import time
6
+ import tempfile
7
+ import os
8
+ import shutil
9
+ import subprocess
10
+ import threading
11
+ import requests
12
+ import json
13
+ from unittest.mock import Mock, patch, MagicMock
14
+ from typing import Any
15
+
16
+ from mito_ai.streamlit_preview.manager import (
17
+ StreamlitPreviewManager,
18
+ PreviewProcess
19
+ )
20
+
21
+
22
+ class TestStreamlitPreviewManager:
23
+ """Test cases for StreamlitPreviewManager."""
24
+
25
+ @pytest.fixture
26
+ def manager(self):
27
+ """Create a fresh manager instance for each test."""
28
+ return StreamlitPreviewManager()
29
+
30
+ @pytest.fixture
31
+ def sample_app_code(self):
32
+ """Sample streamlit app code for testing."""
33
+ return """
34
+ import streamlit as st
35
+
36
+ st.title("Test App")
37
+ st.write("Hello, World!")
38
+ """
39
+
40
+ def test_init(self, manager):
41
+ """Test manager initialization."""
42
+ assert manager._previews == {}
43
+ assert isinstance(manager._lock, type(threading.Lock()))
44
+ assert manager.log is not None
45
+
46
+ def test_get_free_port(self, manager):
47
+ """Test getting a free port."""
48
+ port = manager.get_free_port()
49
+ assert isinstance(port, int)
50
+ assert port > 0
51
+ assert port < 65536
52
+
53
+ # Test that we get different ports
54
+ port2 = manager.get_free_port()
55
+ assert port != port2
56
+
57
+ @pytest.mark.parametrize("app_code,preview_id,expected_success", [
58
+ ("import streamlit as st\nst.write('Hello')", "test_preview", True),
59
+ ("", "empty_preview", True),
60
+ ("import streamlit as st\n" * 1000 + "st.write('Large app')", "large_preview", True),
61
+ ])
62
+ def test_start_streamlit_preview_success_cases(self, manager, app_code, preview_id, expected_success):
63
+ """Test successful streamlit preview start with different app codes."""
64
+ with patch('subprocess.Popen') as mock_popen, \
65
+ patch('requests.get') as mock_requests_get, \
66
+ patch('tempfile.mkdtemp') as mock_mkdtemp:
67
+
68
+ # Setup mocks
69
+ app_directory = "/tmp/test_dir"
70
+ mock_mkdtemp.return_value = app_directory
71
+ mock_proc = Mock()
72
+ mock_proc.terminate.return_value = None
73
+ mock_proc.wait.return_value = None
74
+ mock_popen.return_value = mock_proc
75
+
76
+ mock_response = Mock()
77
+ mock_response.status_code = 200
78
+ mock_requests_get.return_value = mock_response
79
+
80
+ # Test
81
+ port = manager.start_streamlit_preview(app_directory, 'test-file-name.py', preview_id)
82
+
83
+ # Assertions
84
+ assert isinstance(port, int)
85
+ assert port > 0
86
+
87
+ # Verify subprocess was called correctly
88
+ mock_popen.assert_called_once()
89
+ call_args = mock_popen.call_args
90
+ assert "streamlit" in call_args[0][0]
91
+ assert "run" in call_args[0][0]
92
+ assert "--server.headless" in call_args[0][0]
93
+ assert "--server.address" in call_args[0][0]
94
+
95
+ # Cleanup
96
+ manager.stop_preview(preview_id)
97
+
98
+ @pytest.mark.parametrize("exception_type,expected_message", [
99
+ (Exception("Temp dir creation failed"), "failed to start preview"),
100
+ (OSError("Permission denied"), "failed to start preview"),
101
+ (ValueError("Invalid argument"), "failed to start preview"),
102
+ ])
103
+ def test_start_streamlit_preview_exceptions(self, manager, sample_app_code, exception_type, expected_message):
104
+ """Test streamlit preview start with different exceptions."""
105
+ from mito_ai.utils.error_classes import StreamlitPreviewError
106
+
107
+ with patch('subprocess.Popen', side_effect=exception_type):
108
+ app_directory = "/tmp/test_dir"
109
+
110
+ with pytest.raises(StreamlitPreviewError) as exc_info:
111
+ manager.start_streamlit_preview(app_directory, 'test-file-name.py', "test_preview")
112
+
113
+ assert expected_message in str(exc_info.value).lower()
114
+
115
+ @pytest.mark.parametrize("preview_id,expected_result", [
116
+ ("existing_preview", True),
117
+ ("non_existent", False),
118
+ ])
119
+ def test_stop_preview_scenarios(self, manager, sample_app_code, preview_id, expected_result):
120
+ """Test stopping previews with different scenarios."""
121
+ if expected_result:
122
+ # Start a preview first
123
+ with patch('subprocess.Popen') as mock_popen, \
124
+ patch('requests.get') as mock_requests_get, \
125
+ patch('tempfile.mkdtemp') as mock_mkdtemp, \
126
+ patch('builtins.open', create=True) as mock_open, \
127
+ patch('os.path.exists') as mock_exists:
128
+
129
+ app_directory = "/tmp/test_dir"
130
+ mock_mkdtemp.return_value = app_directory
131
+ mock_proc = Mock()
132
+ mock_proc.terminate.return_value = None
133
+ mock_proc.wait.return_value = None
134
+ mock_popen.return_value = mock_proc
135
+
136
+ mock_response = Mock()
137
+ mock_response.status_code = 200
138
+ mock_requests_get.return_value = mock_response
139
+
140
+ # Mock file operations
141
+ mock_file = Mock()
142
+ mock_open.return_value.__enter__.return_value = mock_file
143
+ mock_exists.return_value = True
144
+
145
+ manager.start_streamlit_preview(app_directory, 'test-file-name.py', preview_id)
146
+
147
+ @pytest.mark.parametrize("process_behavior,expected_kill_called", [
148
+ (subprocess.TimeoutExpired("cmd", 5), True),
149
+ (None, False), # Normal termination
150
+ ])
151
+ def test_stop_preview_process_behaviors(self, manager, sample_app_code, process_behavior, expected_kill_called):
152
+ """Test stopping preview with different process behaviors."""
153
+ with patch('subprocess.Popen') as mock_popen, \
154
+ patch('requests.get') as mock_requests_get, \
155
+ patch('tempfile.mkdtemp') as mock_mkdtemp, \
156
+ patch('builtins.open', create=True) as mock_open, \
157
+ patch('os.path.exists') as mock_exists:
158
+
159
+ # Setup mocks for start
160
+ app_directory = "/tmp/test_dir"
161
+ mock_mkdtemp.return_value = app_directory
162
+
163
+ mock_proc = Mock()
164
+ mock_proc.terminate.return_value = None
165
+ mock_proc.wait.return_value = None
166
+ mock_popen.return_value = mock_proc
167
+
168
+ mock_response = Mock()
169
+ mock_response.status_code = 200
170
+ mock_requests_get.return_value = mock_response
171
+
172
+ # Mock file operations
173
+ mock_file = Mock()
174
+ mock_open.return_value.__enter__.return_value = mock_file
175
+ mock_exists.return_value = True
176
+
177
+ # Start a preview
178
+ manager.start_streamlit_preview(app_directory, 'test-file-name.py', "test_preview")
179
+
180
+ # Setup process behavior for stop
181
+ if process_behavior:
182
+ # Configure the mock to raise the exception when called with timeout
183
+ def wait_with_timeout(*args, **kwargs):
184
+ if 'timeout' in kwargs:
185
+ raise process_behavior
186
+ return None
187
+ mock_proc.wait.side_effect = wait_with_timeout
188
+
189
+ @pytest.mark.parametrize("preview_id,expected_found", [
190
+ ("existing_preview", True),
191
+ ("non_existent", False),
192
+ ])
193
+ def test_get_preview_scenarios(self, manager, sample_app_code, preview_id, expected_found):
194
+ """Test getting previews with different scenarios."""
195
+ if expected_found:
196
+ # Start a preview first
197
+ with patch('subprocess.Popen') as mock_popen, \
198
+ patch('requests.get') as mock_requests_get, \
199
+ patch('tempfile.mkdtemp') as mock_mkdtemp, \
200
+ patch('builtins.open', create=True) as mock_open, \
201
+ patch('os.path.exists') as mock_exists:
202
+
203
+ mock_mkdtemp.return_value = "/tmp/test_dir"
204
+ mock_proc = Mock()
205
+ mock_proc.terminate.return_value = None
206
+ mock_proc.wait.return_value = None
207
+ mock_popen.return_value = mock_proc
208
+
209
+ mock_response = Mock()
210
+ mock_response.status_code = 200
211
+ mock_requests_get.return_value = mock_response
212
+
213
+ # Mock file operations
214
+ mock_file = Mock()
215
+ mock_open.return_value.__enter__.return_value = mock_file
216
+ mock_exists.return_value = True
217
+
218
+ manager.start_streamlit_preview("/tmp/test_dir", 'test-file-name.py', preview_id)
219
+
220
+ preview = manager.get_preview(preview_id)
221
+
222
+ if expected_found:
223
+ assert preview is not None
224
+ assert isinstance(preview, PreviewProcess)
225
+ assert preview.port > 0
226
+
227
+ # Cleanup
228
+ manager.stop_preview(preview_id)
229
+ else:
230
+ assert preview is None
231
+
232
+ def test_preview_process_dataclass(self):
233
+ """Test PreviewProcess dataclass."""
234
+ proc = Mock()
235
+ port = 8080
236
+
237
+ preview = PreviewProcess(
238
+ proc=proc,
239
+ port=port
240
+ )
241
+
242
+ assert preview.proc == proc
243
+ assert preview.port == port
244
+
245
+ @pytest.mark.parametrize("num_previews", [1, 2, 3])
246
+ def test_concurrent_previews(self, manager, sample_app_code, num_previews):
247
+ """Test managing multiple concurrent previews."""
248
+ preview_ids = [f"preview_{i}" for i in range(num_previews)]
249
+ ports = []
250
+
251
+ with patch('subprocess.Popen') as mock_popen, \
252
+ patch('requests.get') as mock_requests_get, \
253
+ patch('tempfile.mkdtemp') as mock_mkdtemp, \
254
+ patch('builtins.open', create=True) as mock_open, \
255
+ patch('os.path.exists') as mock_exists:
256
+
257
+ # Setup mocks
258
+ mock_mkdtemp.return_value = "/tmp/test_dir"
259
+ mock_proc = Mock()
260
+ mock_proc.terminate.return_value = None
261
+ mock_proc.wait.return_value = None
262
+ mock_popen.return_value = mock_proc
263
+
264
+ mock_response = Mock()
265
+ mock_response.status_code = 200
266
+ mock_requests_get.return_value = mock_response
267
+
268
+ # Mock file operations
269
+ mock_file = Mock()
270
+ mock_open.return_value.__enter__.return_value = mock_file
271
+ mock_exists.return_value = True
272
+
273
+ # Start multiple previews
274
+ for preview_id in preview_ids:
275
+ port = manager.start_streamlit_preview("/tmp/test_dir", 'test-file-name.py', preview_id)
276
+ ports.append(port)
277
+
278
+ # Assertions
279
+ assert len(set(ports)) == num_previews # All ports should be different
280
+
281
+ # Check all previews exist
282
+ for preview_id in preview_ids:
283
+ assert manager.get_preview(preview_id) is not None
284
+
285
+ # Stop all previews
286
+ for preview_id in preview_ids:
287
+ assert manager.stop_preview(preview_id)
288
+
289
+ # Verify they're gone
290
+ for preview_id in preview_ids:
291
+ assert manager.get_preview(preview_id) is None
292
+
@@ -3,17 +3,45 @@
3
3
 
4
4
  from typing import Any
5
5
  import pytest
6
- from mito_ai.constants import ACTIVE_BASE_URL, MITO_PROD_BASE_URL, MITO_DEV_BASE_URL
6
+ from mito_ai.constants import (
7
+ ACTIVE_BASE_URL, MITO_PROD_BASE_URL, MITO_DEV_BASE_URL,
8
+ MITO_STREAMLIT_DEV_BASE_URL, MITO_STREAMLIT_TEST_BASE_URL, ACTIVE_STREAMLIT_BASE_URL,
9
+ COGNITO_CONFIG_DEV, ACTIVE_COGNITO_CONFIG,
10
+ )
7
11
 
8
12
 
9
13
  def test_prod_lambda_url() -> Any:
10
14
  """Make sure that the lambda urls are correct"""
11
- assert MITO_PROD_BASE_URL == "https://yxwyadgaznhavqvgnbfuo2k6ca0jboku.lambda-url.us-east-1.on.aws"
15
+ assert MITO_PROD_BASE_URL.startswith("https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/")
12
16
 
13
17
  def test_dev_lambda_url() -> Any:
14
18
  """Make sure that the lambda urls are correct"""
15
- assert MITO_DEV_BASE_URL == "https://x3rafympznv4abp7phos44gzgu0clbui.lambda-url.us-east-1.on.aws"
19
+ assert MITO_DEV_BASE_URL.startswith("https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/")
16
20
 
17
21
  def test_active_base_url() -> Any:
18
22
  """Make sure that the active base url is correct"""
19
23
  assert ACTIVE_BASE_URL == MITO_PROD_BASE_URL
24
+
25
+ def test_devenv_streamlit_url() -> Any:
26
+ """Make sure that the streamlit urls are correct"""
27
+ assert MITO_STREAMLIT_DEV_BASE_URL == "https://fr12uvtfy5.execute-api.us-east-1.amazonaws.com"
28
+
29
+ def test_testenv_streamlit_url() -> Any:
30
+ """Make sure that the streamlit urls are correct"""
31
+ assert MITO_STREAMLIT_TEST_BASE_URL == "https://iyual08t6d.execute-api.us-east-1.amazonaws.com"
32
+
33
+ def test_streamlit_active_base_url() -> Any:
34
+ """Make sure that the active streamlit base url is correct"""
35
+ assert ACTIVE_STREAMLIT_BASE_URL == MITO_STREAMLIT_DEV_BASE_URL
36
+
37
+ def test_cognito_config() -> Any:
38
+ """Make sure that the Cognito configuration is correct"""
39
+ expected_config = {
40
+ 'TOKEN_ENDPOINT': 'https://mito-app-auth.auth.us-east-1.amazoncognito.com/oauth2/token',
41
+ 'CLIENT_ID': '6ara3u3l8sss738hrhbq1qtiqf',
42
+ 'CLIENT_SECRET': '',
43
+ 'REDIRECT_URI': 'http://localhost:8888/lab'
44
+ }
45
+
46
+ assert COGNITO_CONFIG_DEV == expected_config
47
+ assert ACTIVE_COGNITO_CONFIG == COGNITO_CONFIG_DEV
@@ -0,0 +1,12 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import pytest
5
+ from mito_ai.utils.telemetry_utils import PRINT_LOGS
6
+
7
+ def test_print_logs_is_false():
8
+ """
9
+ Test to ensure that PRINT_LOGS is set to False.
10
+ """
11
+ assert not PRINT_LOGS, "PRINT_LOGS should be False by default."
12
+
@@ -0,0 +1,2 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.