mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +49 -9
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +142 -67
- mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/{app_builder → app_deploy}/models.py +35 -22
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
- mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
- mito_ai/completions/completion_handlers/utils.py +99 -37
- mito_ai/completions/handlers.py +57 -20
- mito_ai/completions/message_history.py +9 -1
- mito_ai/completions/models.py +31 -7
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
- mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
- mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
- mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
- mito_ai/completions/prompt_builders/utils.py +72 -10
- mito_ai/completions/providers.py +81 -47
- mito_ai/constants.py +25 -24
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +44 -48
- mito_ai/log/handlers.py +10 -3
- mito_ai/log/urls.py +3 -3
- mito_ai/openai_client.py +30 -44
- mito_ai/path_utils.py +70 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
- mito_ai/tests/message_history/test_message_history_utils.py +103 -23
- mito_ai/tests/open_ai_utils_test.py +18 -22
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +2 -6
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +31 -3
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/test_anthropic_utils.py +6 -6
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/anthropic_utils.py +55 -121
- mito_ai/utils/create.py +17 -1
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +39 -94
- mito_ai/utils/message_history_utils.py +7 -4
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +38 -155
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/server_limits.py +1 -1
- mito_ai/utils/telemetry_utils.py +137 -5
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
- mito_ai-0.1.49.dist-info/RECORD +205 -0
- mito_ai/app_builder/handlers.py +0 -218
- mito_ai/tests/providers_test.py +0 -438
- mito_ai/tests/test_anthropic_client.py +0 -270
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
- mito_ai-0.1.33.dist-info/RECORD +0 -134
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
mito_ai/log/handlers.py
CHANGED
|
@@ -3,16 +3,22 @@
|
|
|
3
3
|
|
|
4
4
|
from dataclasses import dataclass
|
|
5
5
|
import json
|
|
6
|
-
from typing import Any, Final
|
|
6
|
+
from typing import Any, Final, Literal
|
|
7
7
|
import tornado
|
|
8
8
|
import os
|
|
9
9
|
from jupyter_server.base.handlers import APIHandler
|
|
10
|
-
from mito_ai.utils.telemetry_utils import log
|
|
10
|
+
from mito_ai.utils.telemetry_utils import MITO_SERVER_KEY, USER_KEY, log
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class LogHandler(APIHandler):
|
|
14
14
|
"""Handler for logging"""
|
|
15
15
|
|
|
16
|
+
def initialize(self, key_type: Literal['mito_server_key', 'user_key']) -> None:
|
|
17
|
+
"""Initialize the log handler"""
|
|
18
|
+
|
|
19
|
+
# The key_type is required so that we know if we can log pro users
|
|
20
|
+
self.key_type = key_type
|
|
21
|
+
|
|
16
22
|
@tornado.web.authenticated
|
|
17
23
|
def put(self) -> None:
|
|
18
24
|
"""Log an event"""
|
|
@@ -26,6 +32,7 @@ class LogHandler(APIHandler):
|
|
|
26
32
|
log_event = data['log_event']
|
|
27
33
|
params = data.get('params', {})
|
|
28
34
|
|
|
29
|
-
|
|
35
|
+
key_type = MITO_SERVER_KEY if self.key_type == "mito_server_key" else USER_KEY
|
|
36
|
+
log(log_event, params, key_type=key_type)
|
|
30
37
|
|
|
31
38
|
|
mito_ai/log/urls.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Any, List, Tuple
|
|
|
5
5
|
from jupyter_server.utils import url_path_join
|
|
6
6
|
from mito_ai.log.handlers import LogHandler
|
|
7
7
|
|
|
8
|
-
def get_log_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
8
|
+
def get_log_urls(base_url: str, key_type: str) -> List[Tuple[str, Any, dict]]:
|
|
9
9
|
"""Get all log related URL patterns.
|
|
10
10
|
|
|
11
11
|
Args:
|
|
@@ -15,7 +15,7 @@ def get_log_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
|
15
15
|
List of (url_pattern, handler_class, handler_kwargs) tuples
|
|
16
16
|
"""
|
|
17
17
|
BASE_URL = base_url + "/mito-ai"
|
|
18
|
-
|
|
18
|
+
|
|
19
19
|
return [
|
|
20
|
-
(url_path_join(BASE_URL, "log"), LogHandler, {}),
|
|
20
|
+
(url_path_join(BASE_URL, "log"), LogHandler, {"key_type": key_type}),
|
|
21
21
|
]
|
mito_ai/openai_client.py
CHANGED
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
from __future__ import annotations
|
|
5
5
|
from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union
|
|
6
6
|
|
|
7
|
+
from mito_ai.utils.mito_server_utils import ProviderCompletionException
|
|
7
8
|
import openai
|
|
8
9
|
from openai.types.chat import ChatCompletionMessageParam
|
|
9
10
|
from traitlets import Instance, Unicode, default, validate
|
|
@@ -36,8 +37,6 @@ from mito_ai.utils.telemetry_utils import (
|
|
|
36
37
|
|
|
37
38
|
OPENAI_MODEL_FALLBACK = "gpt-4.1"
|
|
38
39
|
|
|
39
|
-
OPENAI_FAST_MODEL = "gpt-4.1-nano"
|
|
40
|
-
|
|
41
40
|
class OpenAIClient(LoggingConfigurable):
|
|
42
41
|
"""Provide AI feature through OpenAI services."""
|
|
43
42
|
|
|
@@ -222,26 +221,20 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
222
221
|
)
|
|
223
222
|
return client
|
|
224
223
|
|
|
225
|
-
def
|
|
224
|
+
def _adjust_model_for_azure_or_ollama(self, model: str) -> str:
|
|
226
225
|
|
|
227
226
|
# If they have set an Azure OpenAI model, then we always use it
|
|
228
227
|
if is_azure_openai_configured() and constants.AZURE_OPENAI_MODEL is not None:
|
|
229
228
|
self.log.debug(f"Resolving to Azure OpenAI model: {constants.AZURE_OPENAI_MODEL}")
|
|
230
229
|
return constants.AZURE_OPENAI_MODEL
|
|
231
230
|
|
|
232
|
-
# Otherwise, we use the fast model for anything other than the agent mode
|
|
233
|
-
if response_format_info:
|
|
234
|
-
return OPENAI_FAST_MODEL
|
|
235
|
-
|
|
236
231
|
# If they have set an Ollama model, then we use it
|
|
237
232
|
if constants.OLLAMA_MODEL is not None:
|
|
238
233
|
return constants.OLLAMA_MODEL
|
|
239
234
|
|
|
240
|
-
#
|
|
241
|
-
|
|
242
|
-
return model
|
|
235
|
+
# Otherwise, we use the model they provided
|
|
236
|
+
return model
|
|
243
237
|
|
|
244
|
-
return OPENAI_MODEL_FALLBACK
|
|
245
238
|
|
|
246
239
|
async def request_completions(
|
|
247
240
|
self,
|
|
@@ -263,39 +256,33 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
263
256
|
# Reset the last error
|
|
264
257
|
self.last_error = None
|
|
265
258
|
completion = None
|
|
259
|
+
|
|
260
|
+
# Note: We don't catch exceptions here because we want them to bubble up
|
|
261
|
+
# to the providers file so we can handle all client exceptions in one place.
|
|
266
262
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
263
|
+
# Handle other providers as before
|
|
264
|
+
completion_function_params = get_open_ai_completion_function_params(
|
|
265
|
+
message_type, model, messages, False, response_format_info
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# If they have set an Azure OpenAI or Ollama model, then we use it
|
|
269
|
+
completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
|
|
270
|
+
|
|
271
|
+
if self._active_async_client is not None:
|
|
272
|
+
response = await self._active_async_client.chat.completions.create(**completion_function_params)
|
|
273
|
+
completion = response.choices[0].message.content or ""
|
|
274
|
+
else:
|
|
275
|
+
last_message_content = str(messages[-1].get("content", "")) if messages else None
|
|
276
|
+
completion = await get_ai_completion_from_mito_server(
|
|
277
|
+
last_message_content,
|
|
278
|
+
completion_function_params,
|
|
279
|
+
self.timeout,
|
|
280
|
+
self.max_retries,
|
|
281
|
+
message_type,
|
|
277
282
|
)
|
|
278
283
|
|
|
279
|
-
|
|
280
|
-
response = await self._active_async_client.chat.completions.create(**completion_function_params)
|
|
281
|
-
completion = response.choices[0].message.content or ""
|
|
282
|
-
else:
|
|
283
|
-
last_message_content = str(messages[-1].get("content", "")) if messages else None
|
|
284
|
-
completion = await get_ai_completion_from_mito_server(
|
|
285
|
-
last_message_content,
|
|
286
|
-
completion_function_params,
|
|
287
|
-
self.timeout,
|
|
288
|
-
self.max_retries,
|
|
289
|
-
message_type,
|
|
290
|
-
)
|
|
291
|
-
|
|
292
|
-
update_mito_server_quota(message_type)
|
|
284
|
+
return completion
|
|
293
285
|
|
|
294
|
-
return completion
|
|
295
|
-
|
|
296
|
-
except BaseException as e:
|
|
297
|
-
self.last_error = CompletionError.from_exception(e)
|
|
298
|
-
raise
|
|
299
286
|
|
|
300
287
|
async def stream_completions(
|
|
301
288
|
self,
|
|
@@ -315,9 +302,6 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
315
302
|
# Reset the last error
|
|
316
303
|
self.last_error = None
|
|
317
304
|
accumulated_response = ""
|
|
318
|
-
|
|
319
|
-
# Validate that the model is supported.
|
|
320
|
-
model = self._resolve_model(model, response_format_info)
|
|
321
305
|
|
|
322
306
|
# Send initial acknowledgment
|
|
323
307
|
reply_fn(CompletionReply(
|
|
@@ -329,8 +313,10 @@ This attribute is observed by the websocket provider to push the error to the cl
|
|
|
329
313
|
|
|
330
314
|
# Handle other providers as before
|
|
331
315
|
completion_function_params = get_open_ai_completion_function_params(
|
|
332
|
-
model, messages, True, response_format_info
|
|
316
|
+
message_type, model, messages, True, response_format_info
|
|
333
317
|
)
|
|
318
|
+
|
|
319
|
+
completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
|
|
334
320
|
|
|
335
321
|
try:
|
|
336
322
|
if self._active_async_client is not None:
|
mito_ai/path_utils.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import NewType
|
|
5
|
+
import os
|
|
6
|
+
from mito_ai.utils.error_classes import StreamlitPreviewError
|
|
7
|
+
|
|
8
|
+
# Type definitions for better type safety
|
|
9
|
+
AbsoluteNotebookPath = NewType('AbsoluteNotebookPath', str)
|
|
10
|
+
AbsoluteNotebookDirPath = NewType('AbsoluteNotebookDirPath', str)
|
|
11
|
+
AbsoluteAppPath = NewType('AbsoluteAppPath', str)
|
|
12
|
+
AppFileName = NewType("AppFileName", str)
|
|
13
|
+
|
|
14
|
+
def get_absolute_notebook_path(notebook_path: str) -> AbsoluteNotebookPath:
|
|
15
|
+
"""
|
|
16
|
+
Convert any notebook path to an absolute path.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
notebook_path: Path to the notebook (can be relative or absolute)
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
AbsoluteNotebookPath: The absolute path to the notebook
|
|
23
|
+
|
|
24
|
+
Raises:
|
|
25
|
+
ValueError: If the path is invalid or empty
|
|
26
|
+
"""
|
|
27
|
+
if not notebook_path or not notebook_path.strip():
|
|
28
|
+
raise StreamlitPreviewError("Notebook path cannot be empty", 400)
|
|
29
|
+
|
|
30
|
+
absolute_path = os.path.abspath(notebook_path)
|
|
31
|
+
return AbsoluteNotebookPath(absolute_path)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_absolute_notebook_dir_path(notebook_path: AbsoluteNotebookPath) -> AbsoluteNotebookDirPath:
|
|
35
|
+
"""
|
|
36
|
+
Get the absolute directory containing the notebook.
|
|
37
|
+
"""
|
|
38
|
+
return AbsoluteNotebookDirPath(os.path.dirname(notebook_path))
|
|
39
|
+
|
|
40
|
+
def get_absolute_app_path(app_directory: AbsoluteNotebookDirPath, app_file_name: AppFileName) -> AbsoluteAppPath:
|
|
41
|
+
"""
|
|
42
|
+
Get the absolute path to the app
|
|
43
|
+
"""
|
|
44
|
+
return AbsoluteAppPath(os.path.join(app_directory, app_file_name))
|
|
45
|
+
|
|
46
|
+
def get_app_file_name(notebook_id: str) -> AppFileName:
|
|
47
|
+
"""
|
|
48
|
+
Converts the notebook id into the corresponding app id
|
|
49
|
+
"""
|
|
50
|
+
mito_app_name = notebook_id.replace('mito-notebook-', 'mito-app-')
|
|
51
|
+
return AppFileName(f'{mito_app_name}.py')
|
|
52
|
+
|
|
53
|
+
def does_app_path_exist(app_path: AbsoluteAppPath) -> bool:
|
|
54
|
+
"""
|
|
55
|
+
Check if the app file exists
|
|
56
|
+
"""
|
|
57
|
+
return os.path.exists(app_path)
|
|
58
|
+
|
|
59
|
+
def does_notebook_id_have_corresponding_app(notebook_id: str, notebook_path: str) -> bool:
|
|
60
|
+
"""
|
|
61
|
+
Given a notebook_id and raw notebook_path checks if the notebook has a corresponding
|
|
62
|
+
app by converting the notebook_path into an absolute path and converting the notebook_id
|
|
63
|
+
into an app name
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
app_file_name = get_app_file_name(notebook_id)
|
|
67
|
+
notebook_path = get_absolute_notebook_path(notebook_path)
|
|
68
|
+
app_directory = get_absolute_notebook_dir_path(notebook_path)
|
|
69
|
+
app_path = get_absolute_app_path(app_directory, app_file_name)
|
|
70
|
+
return does_app_path_exist(app_path)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Tuple
|
|
5
|
+
import re
|
|
6
|
+
from anthropic.types import MessageParam
|
|
7
|
+
from mito_ai.streamlit_conversion.prompts.streamlit_system_prompt import streamlit_system_prompt
|
|
8
|
+
from mito_ai.utils.anthropic_utils import stream_anthropic_completion_from_mito_server
|
|
9
|
+
from mito_ai.streamlit_conversion.prompts.prompt_constants import MITO_TODO_PLACEHOLDER
|
|
10
|
+
from mito_ai.completions.models import MessageType
|
|
11
|
+
|
|
12
|
+
STREAMLIT_AI_MODEL = "claude-sonnet-4-5-20250929"
|
|
13
|
+
|
|
14
|
+
def extract_todo_placeholders(agent_response: str) -> List[str]:
|
|
15
|
+
"""Extract TODO placeholders from the agent's response"""
|
|
16
|
+
return [line.strip() for line in agent_response.split('\n') if MITO_TODO_PLACEHOLDER in line]
|
|
17
|
+
|
|
18
|
+
async def get_response_from_agent(message_to_agent: List[MessageParam]) -> str:
|
|
19
|
+
"""Gets the streaming response from the agent using the mito server"""
|
|
20
|
+
model = STREAMLIT_AI_MODEL
|
|
21
|
+
max_tokens = 64000 # TODO: If we move to haiku, we must reset this to 8192
|
|
22
|
+
temperature = 0.2
|
|
23
|
+
|
|
24
|
+
accumulated_response = ""
|
|
25
|
+
async for stream_chunk in stream_anthropic_completion_from_mito_server(
|
|
26
|
+
model = model,
|
|
27
|
+
max_tokens = max_tokens,
|
|
28
|
+
temperature = temperature,
|
|
29
|
+
system = streamlit_system_prompt,
|
|
30
|
+
messages = message_to_agent,
|
|
31
|
+
stream=True,
|
|
32
|
+
message_type=MessageType.STREAMLIT_CONVERSION,
|
|
33
|
+
reply_fn=None,
|
|
34
|
+
message_id=""
|
|
35
|
+
):
|
|
36
|
+
accumulated_response += stream_chunk
|
|
37
|
+
return accumulated_response
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
MITO_TODO_PLACEHOLDER = "# MITO_TODO_PLACEHOLDER"
|
|
5
|
+
|
|
6
|
+
search_replace_instructions = f"""
|
|
7
|
+
RESPONSE FORMAT: You can edit the existing code using the **SEARCH_REPLACE format** for exact string matching and replacement.
|
|
8
|
+
|
|
9
|
+
**STRUCTURE:**
|
|
10
|
+
```search_replace
|
|
11
|
+
>>>>>>> SEARCH
|
|
12
|
+
[exact code currently in the file]
|
|
13
|
+
=======
|
|
14
|
+
[new code to replace it with]
|
|
15
|
+
<<<<<<< REPLACE
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
**COMPONENTS:**
|
|
19
|
+
```search_replace - This is the start of the search/replace block
|
|
20
|
+
- `>>>>>>> SEARCH` - Exact text that EXISTS NOW in the file (7 chevrons)
|
|
21
|
+
- `=======` - Separator between the search and replace blocks (7 equals signs)
|
|
22
|
+
- `<<<<<<< REPLACE` - Replacement text (7 chevrons)
|
|
23
|
+
|
|
24
|
+
---
|
|
25
|
+
|
|
26
|
+
**CRITICAL RULES - READ CAREFULLY:**
|
|
27
|
+
|
|
28
|
+
1. **SEARCH = CURRENT STATE ONLY**
|
|
29
|
+
- The SEARCH block must contain ONLY code that currently exists in the file
|
|
30
|
+
- NEVER include new code, future code, or code you wish existed in the SEARCH block
|
|
31
|
+
- Copy exact text from the current file, character-for-character
|
|
32
|
+
|
|
33
|
+
2. **EXACT MATCHING REQUIRED**
|
|
34
|
+
- Every space, tab, newline must match perfectly
|
|
35
|
+
- Preserve exact indentation (spaces vs tabs)
|
|
36
|
+
- Include trailing newlines if present
|
|
37
|
+
- No approximations - even one character difference will fail
|
|
38
|
+
|
|
39
|
+
3. **SIZE LIMITS**
|
|
40
|
+
- There are no size limits to each search/replace block, however, it is generally preferable to keep the SEARCH blocks small and focused on one change.
|
|
41
|
+
- For large changes, use multiple smaller search/replace blocks
|
|
42
|
+
|
|
43
|
+
4. **UNIQUENESS**
|
|
44
|
+
- Include enough context to make the SEARCH block unique
|
|
45
|
+
- If text appears multiple times, add surrounding lines
|
|
46
|
+
- Ensure there's only ONE match in the file
|
|
47
|
+
|
|
48
|
+
5. **VERIFICATION CHECKLIST** (before generating each block):
|
|
49
|
+
✓ Is every line in my SEARCH block currently in the file?
|
|
50
|
+
✓ Did I copy the exact spacing and whitespace?
|
|
51
|
+
✓ Will this match exactly once?
|
|
52
|
+
|
|
53
|
+
6. **SEARCH REPLACE BLOCK STRUCTURE**
|
|
54
|
+
- You must adhere to to the exact search_replace structure as shown in the examples.
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
**MULTIPLE REPLACEMENTS:**
|
|
59
|
+
- You can include multiple search/replace blocks in one response
|
|
60
|
+
- Each block is independent and processed separately
|
|
61
|
+
- Use separate ```search_replace blocks for each change
|
|
62
|
+
|
|
63
|
+
<Example 1: Updating existing content>
|
|
64
|
+
|
|
65
|
+
```search_replace
|
|
66
|
+
>>>>>>> SEARCH
|
|
67
|
+
st.title("Old Title")
|
|
68
|
+
=======
|
|
69
|
+
st.title("New Title")
|
|
70
|
+
<<<<<<< REPLACE
|
|
71
|
+
```
|
|
72
|
+
</Example 1>
|
|
73
|
+
|
|
74
|
+
<Example 2: Adding new content>
|
|
75
|
+
|
|
76
|
+
```search_replace
|
|
77
|
+
>>>>>>> SEARCH
|
|
78
|
+
st.title("My App")
|
|
79
|
+
=======
|
|
80
|
+
st.title("My App")
|
|
81
|
+
st.header("Welcome")
|
|
82
|
+
st.write("This is a test app")
|
|
83
|
+
<<<<<<< REPLACE
|
|
84
|
+
```
|
|
85
|
+
</Example 2>
|
|
86
|
+
|
|
87
|
+
<Example 3: Deleting existing content>
|
|
88
|
+
|
|
89
|
+
```search_replace
|
|
90
|
+
>>>>>>> SEARCH
|
|
91
|
+
st.write("Old message")
|
|
92
|
+
=======
|
|
93
|
+
<<<<<<< REPLACE
|
|
94
|
+
```
|
|
95
|
+
</Example 3>
|
|
96
|
+
|
|
97
|
+
<Example 4: Multiple replacements in one response>
|
|
98
|
+
|
|
99
|
+
```search_replace
|
|
100
|
+
>>>>>>> SEARCH
|
|
101
|
+
st.title("Old Title")
|
|
102
|
+
=======
|
|
103
|
+
st.title("New Title")
|
|
104
|
+
<<<<<<< REPLACE
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
```search_replace
|
|
108
|
+
>>>>>>> SEARCH
|
|
109
|
+
st.write("Old message")
|
|
110
|
+
=======
|
|
111
|
+
st.write("New message")
|
|
112
|
+
<<<<<<< REPLACE
|
|
113
|
+
```
|
|
114
|
+
</Example 4>
|
|
115
|
+
|
|
116
|
+
<Example 5: Using extra context to identify the correct code to replace>
|
|
117
|
+
|
|
118
|
+
In the below example, assume that the code st.write("Old message") appears multiple times in the file, so we use extra context lines to identify the correct code to replace.
|
|
119
|
+
|
|
120
|
+
```search_replace
|
|
121
|
+
>>>>>>> SEARCH
|
|
122
|
+
# This is a unique comment
|
|
123
|
+
st.write("Old message")
|
|
124
|
+
=======
|
|
125
|
+
# This is a unique comment
|
|
126
|
+
st.write("New message")
|
|
127
|
+
<<<<<<< REPLACE
|
|
128
|
+
```
|
|
129
|
+
</Example 5>
|
|
130
|
+
|
|
131
|
+
<Example 6: Search/replace while respecting whitespace and indentation>
|
|
132
|
+
|
|
133
|
+
```search_replace
|
|
134
|
+
>>>>>>> SEARCH
|
|
135
|
+
data_list = [
|
|
136
|
+
{{'id': 1, 'name': 'Item A'}},
|
|
137
|
+
{MITO_TODO_PLACEHOLDER}: Add remaining entries from notebook
|
|
138
|
+
]
|
|
139
|
+
=======
|
|
140
|
+
data_list = [
|
|
141
|
+
{{'id': 1, 'name': 'Item A'}},
|
|
142
|
+
{{'id': 2, 'name': 'Item B'}},
|
|
143
|
+
{{'id': 3, 'name': 'Item C'}},
|
|
144
|
+
{{'id': 4, 'name': 'Item D'}}
|
|
145
|
+
]
|
|
146
|
+
<<<<<<< REPLACE
|
|
147
|
+
```
|
|
148
|
+
</Example 6>
|
|
149
|
+
|
|
150
|
+
<Example 7: Tab structure changes>
|
|
151
|
+
|
|
152
|
+
```search_replace
|
|
153
|
+
>>>>>>> SEARCH
|
|
154
|
+
tab1, tab2 = st.tabs(["Cat", "Dog"])
|
|
155
|
+
|
|
156
|
+
with tab1:
|
|
157
|
+
st.header("A cat")
|
|
158
|
+
st.image("https://static.streamlit.io/examples/cat.jpg", width=200)
|
|
159
|
+
with tab2:
|
|
160
|
+
st.header("A dog")
|
|
161
|
+
st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
|
|
162
|
+
=======
|
|
163
|
+
st.header("A cat")
|
|
164
|
+
st.image("https://static.streamlit.io/examples/cat.jpg", width=200)
|
|
165
|
+
st.header("A dog")
|
|
166
|
+
st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
|
|
167
|
+
<<<<<<< REPLACE
|
|
168
|
+
```
|
|
169
|
+
</Example 7>
|
|
170
|
+
|
|
171
|
+
Your response must consist **only** of valid search_replace blocks.
|
|
172
|
+
"""
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
def add_line_numbers_to_code(code: str) -> str:
|
|
5
|
+
"""Add line numbers to the code"""
|
|
6
|
+
code_with_line_numbers = ""
|
|
7
|
+
for i, line in enumerate(code.split('\n'), 1):
|
|
8
|
+
code_with_line_numbers += f"{i:3d}: {line}\n"
|
|
9
|
+
|
|
10
|
+
return code_with_line_numbers
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List
|
|
5
|
+
from mito_ai.streamlit_conversion.prompts.prompt_constants import MITO_TODO_PLACEHOLDER
|
|
6
|
+
|
|
7
|
+
def get_streamlit_app_creation_prompt(notebook: List[dict]) -> str:
|
|
8
|
+
"""
|
|
9
|
+
This prompt is used to create a streamlit app from a notebook.
|
|
10
|
+
"""
|
|
11
|
+
return f"""Convert the following Jupyter notebook into a Streamlit application.
|
|
12
|
+
|
|
13
|
+
GOAL: Create a complete, runnable Streamlit app that accurately represents the notebook. It must completely convert the notebook.
|
|
14
|
+
|
|
15
|
+
TODO PLACEHOLDER RULES:
|
|
16
|
+
If you decide to leave any TODOs, you must mark them with {MITO_TODO_PLACEHOLDER}. You should use {MITO_TODO_PLACEHOLDER} instead of comments like the following:
|
|
17
|
+
- # ... (include all mappings from the notebook)
|
|
18
|
+
- # ... (include all violation codes from the notebook)
|
|
19
|
+
- # Fill in the rest of the code here
|
|
20
|
+
- # TODO: Add more code here
|
|
21
|
+
- # TODO: Add the visualization code here
|
|
22
|
+
|
|
23
|
+
For each TODO, use this exact format:
|
|
24
|
+
{MITO_TODO_PLACEHOLDER}: <specific description of what needs to be added>
|
|
25
|
+
|
|
26
|
+
IMPORTANT:
|
|
27
|
+
- The app must still be RUNNABLE even with placeholders
|
|
28
|
+
- Include enough sample data to show the structure
|
|
29
|
+
- Do NOT use placeholders for small/medium content - include it directly
|
|
30
|
+
- Do NOT use placeholders for file paths, imports, or core logic
|
|
31
|
+
- Only use placeholders when absolutely necessary. Add all of the content directly as much as possible.
|
|
32
|
+
|
|
33
|
+
<Example>
|
|
34
|
+
If the notebook has a list of dictionaries with 50 entries, you would write:
|
|
35
|
+
|
|
36
|
+
data = [
|
|
37
|
+
{{'id': 1, 'name': 'Item A', 'category': 'Type 1', 'value': 100}},
|
|
38
|
+
{{'id': 2, 'name': 'Item B', 'category': 'Type 2', 'value': 200}},
|
|
39
|
+
{MITO_TODO_PLACEHOLDER}: Add remaining entries from the data list
|
|
40
|
+
]
|
|
41
|
+
</Example>
|
|
42
|
+
|
|
43
|
+
Notebook to convert:
|
|
44
|
+
|
|
45
|
+
{notebook}
|
|
46
|
+
"""
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from mito_ai.streamlit_conversion.prompts.prompt_constants import search_replace_instructions
|
|
5
|
+
from mito_ai.streamlit_conversion.prompts.prompt_utils import add_line_numbers_to_code
|
|
6
|
+
|
|
7
|
+
def get_streamlit_error_correction_prompt(error: str, streamlit_app_code: str) -> str:
|
|
8
|
+
|
|
9
|
+
existing_streamlit_app_code_with_line_numbers = add_line_numbers_to_code(streamlit_app_code)
|
|
10
|
+
|
|
11
|
+
return f"""You've created a Streamlit app, but it has an error in it when you try to run it.
|
|
12
|
+
|
|
13
|
+
Your job is to fix the error now. Only fix the specific error that you are instructed to fix now. Do not fix other error that that you anticipate. You will be asked to fix other errors later.
|
|
14
|
+
|
|
15
|
+
{search_replace_instructions}
|
|
16
|
+
|
|
17
|
+
===============================================
|
|
18
|
+
|
|
19
|
+
EXISTING STREAMLIT APP:
|
|
20
|
+
{existing_streamlit_app_code_with_line_numbers}
|
|
21
|
+
|
|
22
|
+
===============================================
|
|
23
|
+
|
|
24
|
+
Please create a search/replace block that corrects this error. Please keep your fix concise:
|
|
25
|
+
{error}
|
|
26
|
+
|
|
27
|
+
"""
|
|
28
|
+
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List
|
|
5
|
+
from mito_ai.streamlit_conversion.prompts.prompt_constants import MITO_TODO_PLACEHOLDER, search_replace_instructions
|
|
6
|
+
from mito_ai.streamlit_conversion.prompts.prompt_utils import add_line_numbers_to_code
|
|
7
|
+
|
|
8
|
+
def get_finish_todo_prompt(notebook: List[dict], existing_streamlit_app_code: str, todo_placeholder: str) -> str:
|
|
9
|
+
|
|
10
|
+
existing_streamlit_app_code_with_line_numbers = add_line_numbers_to_code(existing_streamlit_app_code)
|
|
11
|
+
|
|
12
|
+
return f"""You've already created the first draft of a Streamlit app representation of a Jupyter notebook, but you left yourself some TODOs marked as `{MITO_TODO_PLACEHOLDER}`.
|
|
13
|
+
|
|
14
|
+
**CRITICAL COMPLETION REQUIREMENT:**
|
|
15
|
+
You have ONE and ONLY ONE opportunity to complete this TODO. If you do not finish the entire task completely, the application will be broken and unusable. This is your final chance to get it right.
|
|
16
|
+
|
|
17
|
+
**COMPLETION RULES:**
|
|
18
|
+
1. **NEVER leave partial work** - If the TODO asks for a list with 100 items, provide ALL 100 items
|
|
19
|
+
2. **NEVER use placeholders** - This is your only opportunity to fulfill this TODO, so do not leave yourself another TODO.
|
|
20
|
+
3. **NEVER assume "good enough"** - Complete the task to 100% satisfaction
|
|
21
|
+
4. **If the task seems large, that's exactly why it needs to be done now** - This is your only chance
|
|
22
|
+
|
|
23
|
+
**HOW TO DETERMINE IF TASK IS COMPLETE:**
|
|
24
|
+
- If building a list/dictionary: Include ALL items that should be in the final data structure
|
|
25
|
+
- If creating functions: Implement ALL required functionality
|
|
26
|
+
- If converting a visualization: Copy over ALL of the visualization code from the notebook, including all styling and formatting.
|
|
27
|
+
|
|
28
|
+
{search_replace_instructions}
|
|
29
|
+
|
|
30
|
+
===============================================
|
|
31
|
+
|
|
32
|
+
Input Notebook that you are converting into the Streamlit app:
|
|
33
|
+
{notebook}
|
|
34
|
+
|
|
35
|
+
===============================================
|
|
36
|
+
|
|
37
|
+
EXISTING STREAMLIT APP:
|
|
38
|
+
{existing_streamlit_app_code_with_line_numbers}
|
|
39
|
+
|
|
40
|
+
===============================================
|
|
41
|
+
|
|
42
|
+
Please make the changes for this TODO. Only focus on this one TODO right now. You will be asked to fix others later:
|
|
43
|
+
{todo_placeholder}
|
|
44
|
+
|
|
45
|
+
"""
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
streamlit_system_prompt = """You are a code conversion specialist who converts Jupyter notebooks into Streamlit applications with ABSOLUTE FIDELITY.
|
|
5
|
+
|
|
6
|
+
ROLE AND EXPERTISE:
|
|
7
|
+
- Expert in Python, Jupyter notebooks, Streamlit, and data visualization
|
|
8
|
+
- Experienced in creating executive-ready dashboards for business stakeholders
|
|
9
|
+
- Skilled in translating technical analysis into clear, interactive presentations
|
|
10
|
+
|
|
11
|
+
TASK REQUIREMENTS:
|
|
12
|
+
1. Convert Jupyter notebook content into a complete Streamlit application (app.py)
|
|
13
|
+
2. Preserve ALL outputs from code cells and markdown cells as they appear in the notebook
|
|
14
|
+
3. Maintain the logical flow and structure of the original analysis
|
|
15
|
+
4. Create an executive-friendly dashboard suitable for company leadership
|
|
16
|
+
|
|
17
|
+
STREAMLIT IMPLEMENTATION GUIDELINES:
|
|
18
|
+
- Use appropriate Streamlit components (st.title, st.header, st.subheader, st.markdown, etc.)
|
|
19
|
+
- Display all visualizations using st.pyplot(), st.plotly_chart(), or st.altair_chart() as appropriate
|
|
20
|
+
- Do not convert database connections into Streamlit's secret.toml format. If the user inlined their database credentials, are importing from an environment variable, or reading from a connections file, assume that same approach will work in the streamlit app.
|
|
21
|
+
- Show dataframes and tables using st.dataframe() or st.table()
|
|
22
|
+
- Include all text explanations and insights from markdown cells
|
|
23
|
+
- Add interactive elements where beneficial (filters, selectors, etc.)
|
|
24
|
+
- Ensure professional styling and layout suitable for executives
|
|
25
|
+
- Just create the streamlit app code, do not include a _main_ function block. The file will be run directly using `streamlit run app.py`.
|
|
26
|
+
|
|
27
|
+
CRITICAL REQUIREMENTS:
|
|
28
|
+
1. **PRESERVE ALL CODE EXACTLY**: Every line of code, every data structure, every import must be included in full
|
|
29
|
+
2. **NO PLACEHOLDERS**: Never use comments like "# Add more data here" or "# Fill in the rest"
|
|
30
|
+
3. **NO SIMPLIFICATION**: Do not replace actual data with sample data or hardcoded examples
|
|
31
|
+
4. **COMPLETE DATA STRUCTURES**: If a notebook has a 1000-line dictionary, include all 1000 lines
|
|
32
|
+
5. **PRESERVE DATA LOADING**: If the notebook reads from files, the Streamlit app must read from the same files
|
|
33
|
+
6. **NO IMPROVIZAITION**: Do not provide your own interpretations of the analysis. Just convert the existing analysis into a streamlit app.
|
|
34
|
+
|
|
35
|
+
STYLE GUIDELINES:
|
|
36
|
+
- Create a professional, executive-friendly dashboard
|
|
37
|
+
- If there are variables in the notebook that the streamlit app viewer would likely want to configure, then use the appropriate streamlit component to allow them to do so. For examples, if the notebook has a variable called "start_date" and "end_date", then use the st.date_input component to allow the user to select the start and end dates.
|
|
38
|
+
- Do not use emojis unless they are in the notebook already
|
|
39
|
+
- Do not modify the graphs or analysis. If the notebook has a graph, use the same graph in the streamlit app.
|
|
40
|
+
- Always include the following code at the top of the file so the user does not use the wrong deploy button
|
|
41
|
+
```python
|
|
42
|
+
st.markdown(\"\"\"
|
|
43
|
+
<style>
|
|
44
|
+
#MainMenu {visibility: hidden;}
|
|
45
|
+
.stAppDeployButton {display:none;}
|
|
46
|
+
footer {visibility: hidden;}
|
|
47
|
+
.stMainBlockContainer {padding: 2rem 1rem 2rem 1rem;}
|
|
48
|
+
</style>
|
|
49
|
+
\"\"\", unsafe_allow_html=True)
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
OUTPUT FORMAT:
|
|
53
|
+
- Output the complete, runnable app.py file.
|
|
54
|
+
- Do not output any extra text, just give the python code.
|
|
55
|
+
|
|
56
|
+
"""
|