mito-ai 0.1.40__py3-none-any.whl → 0.1.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (64) hide show
  1. mito_ai/__init__.py +19 -6
  2. mito_ai/_version.py +1 -1
  3. mito_ai/app_builder/handlers.py +1 -2
  4. mito_ai/app_manager/__init__.py +4 -0
  5. mito_ai/app_manager/handlers.py +129 -0
  6. mito_ai/app_manager/models.py +58 -0
  7. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  8. mito_ai/completions/completion_handlers/chat_completion_handler.py +2 -2
  9. mito_ai/completions/completion_handlers/utils.py +77 -37
  10. mito_ai/completions/handlers.py +1 -1
  11. mito_ai/completions/message_history.py +9 -1
  12. mito_ai/completions/models.py +3 -1
  13. mito_ai/completions/prompt_builders/agent_execution_prompt.py +2 -0
  14. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  15. mito_ai/completions/prompt_builders/agent_system_message.py +17 -0
  16. mito_ai/completions/prompt_builders/utils.py +7 -0
  17. mito_ai/constants.py +3 -2
  18. mito_ai/file_uploads/__init__.py +3 -0
  19. mito_ai/file_uploads/handlers.py +225 -0
  20. mito_ai/file_uploads/urls.py +21 -0
  21. mito_ai/openai_client.py +1 -1
  22. mito_ai/tests/completions/completion_handlers_utils_test.py +51 -0
  23. mito_ai/tests/file_uploads/__init__.py +2 -0
  24. mito_ai/tests/file_uploads/test_handlers.py +267 -0
  25. mito_ai/tests/message_history/test_message_history_utils.py +57 -4
  26. mito_ai/utils/mito_server_utils.py +7 -0
  27. mito_ai/utils/server_limits.py +1 -1
  28. mito_ai/utils/telemetry_utils.py +26 -9
  29. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  30. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  31. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  32. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js +2662 -1144
  33. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.a9a35b6fcc54a7bcb32c.js.map +1 -0
  34. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  35. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  36. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js +50 -26
  37. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.c7d9d8635826165de52e.js.map +1 -0
  38. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  39. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  40. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  41. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  42. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  43. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  44. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  45. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  46. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  47. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  48. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  49. mito_ai-0.1.42.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  50. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/METADATA +1 -1
  51. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/RECORD +61 -40
  52. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.55d9f8ca386d87856d2d.js.map +0 -1
  53. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.264103d9addd1e166113.js.map +0 -1
  54. mito_ai-0.1.40.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  55. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  56. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  57. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  58. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  59. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  60. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  61. {mito_ai-0.1.40.data → mito_ai-0.1.42.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  62. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/WHEEL +0 -0
  63. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/entry_points.txt +0 -0
  64. {mito_ai-0.1.40.dist-info → mito_ai-0.1.42.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py CHANGED
@@ -14,13 +14,20 @@ from mito_ai.settings.urls import get_settings_urls
14
14
  from mito_ai.rules.urls import get_rules_urls
15
15
  from mito_ai.auth.urls import get_auth_urls
16
16
  from mito_ai.streamlit_preview.urls import get_streamlit_preview_urls
17
+ from mito_ai.app_manager.handlers import AppManagerHandler
18
+ from mito_ai.file_uploads.urls import get_file_uploads_urls
19
+
20
+ # Force Matplotlib to use the Jupyter inline backend.
21
+ # Background: importing Streamlit sets os.environ["MPLBACKEND"] = "Agg" very early.
22
+ # In a Jupyter kernel, that selects a non‑interactive canvas and can trigger:
23
+ # "UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown"
24
+ # which prevents figures from rendering in notebook outputs.
25
+ # We preempt this by selecting the canonical Jupyter inline backend BEFORE any
26
+ # Matplotlib import, so figures render inline reliably. This must run very early.
27
+ # See: https://github.com/streamlit/streamlit/issues/9640
17
28
 
18
- # Sometimes matplotlib figures do not show up in the notebook with this warning:
19
- # UserWarning: FigureCanvasAgg is non-interactive, and thus cannot be shown
20
- # I believe that streamlit is reconfiguring the matplotlib settings and this is happening as a result.
21
- # For now, we just set the backend to inline, so that the figures show up again
22
29
  import os
23
- os.environ['MPLBACKEND'] = 'inline'
30
+ os.environ["MPLBACKEND"] = "module://matplotlib_inline.backend_inline"
24
31
 
25
32
  try:
26
33
  from _version import __version__
@@ -77,6 +84,11 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
77
84
  url_path_join(base_url, "mito-ai", "version-check"),
78
85
  VersionCheckHandler,
79
86
  {},
87
+ ),
88
+ (
89
+ url_path_join(base_url, "mito-ai", "app-manager"),
90
+ AppManagerHandler,
91
+ {}
80
92
  )
81
93
  ]
82
94
 
@@ -87,6 +99,7 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
87
99
  handlers.extend(get_log_urls(base_url, open_ai_provider.key_type)) # type: ignore
88
100
  handlers.extend(get_auth_urls(base_url)) # type: ignore
89
101
  handlers.extend(get_streamlit_preview_urls(base_url)) # type: ignore
90
-
102
+ handlers.extend(get_file_uploads_urls(base_url)) # type: ignore
103
+
91
104
  web_app.add_handlers(host_pattern, handlers)
92
105
  server_app.log.info("Loaded the mito_ai server extension")
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.40'
4
+ __version__ = VERSION = '0.1.42'
@@ -156,9 +156,8 @@ class AppBuilderHandler(BaseWebSocketHandler):
156
156
  success_flag, app_path_result, result_message = await streamlit_handler(notebook_path)
157
157
  if not success_flag or app_path_result is None:
158
158
  raise Exception(result_message)
159
- app_path = app_path_result
160
159
 
161
- deploy_url = await self._deploy_app(app_path, jwt_token)
160
+ deploy_url = await self._deploy_app(app_directory, jwt_token)
162
161
 
163
162
  # Send the response
164
163
  self.reply(BuildAppReply(
@@ -0,0 +1,4 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ """App manager module for Mito AI."""
@@ -0,0 +1,129 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ # app_manager/handlers.py
5
+ import os
6
+ import time
7
+ import logging
8
+ from typing import Union
9
+ from mito_ai.utils.websocket_base import BaseWebSocketHandler
10
+ from mito_ai.app_manager.models import (
11
+ App,
12
+ AppManagerError,
13
+ ManageAppRequest,
14
+ ManageAppReply,
15
+ ErrorMessage,
16
+ MessageType
17
+ )
18
+ from mito_ai.constants import ACTIVE_STREAMLIT_BASE_URL
19
+ from mito_ai.logger import get_logger
20
+ import requests
21
+
22
+
23
+ class AppManagerHandler(BaseWebSocketHandler):
24
+ """Handler for app management requests."""
25
+
26
+ def initialize(self) -> None:
27
+ """Initialize the WebSocket handler."""
28
+ super().initialize()
29
+ self.log.debug("Initializing app manager websocket connection %s", self.request.path)
30
+
31
+ @property
32
+ def log(self) -> logging.Logger:
33
+ """Use Mito AI logger."""
34
+ return get_logger()
35
+
36
+ async def on_message(self, message: Union[str, bytes]) -> None:
37
+ """Handle incoming messages on the WebSocket."""
38
+ start = time.time()
39
+
40
+ # Convert bytes to string if needed
41
+ if isinstance(message, bytes):
42
+ message = message.decode('utf-8')
43
+
44
+ self.log.debug("App manager message received: %s", message)
45
+
46
+ try:
47
+ # Ensure message is a string before parsing
48
+ if not isinstance(message, str):
49
+ raise ValueError("Message must be a string")
50
+
51
+ parsed_message = self.parse_message(message)
52
+ message_type = parsed_message.get('type')
53
+ message_id = parsed_message.get('message_id')
54
+
55
+ if message_type == MessageType.MANAGE_APP.value:
56
+ # Handle manage app request
57
+ manage_app_request = ManageAppRequest(**parsed_message)
58
+ await self._handle_manage_app(manage_app_request)
59
+ else:
60
+ self.log.error(f"Unknown message type: {message_type}")
61
+ error_response = ErrorMessage(
62
+ error_type="InvalidRequest",
63
+ title=f"Unknown message type: {message_type}",
64
+ message_id=message_id
65
+ )
66
+ self.reply(error_response)
67
+
68
+ except ValueError as e:
69
+ self.log.error("Invalid app manager request", exc_info=e)
70
+ error_response = ErrorMessage(
71
+ error_type=type(e).__name__,
72
+ title=str(e),
73
+ message_id=parsed_message.get('message_id') if 'parsed_message' in locals() else None
74
+ )
75
+ self.reply(error_response)
76
+ except Exception as e:
77
+ self.log.error("Error handling app manager message", exc_info=e)
78
+ error_response = ErrorMessage(
79
+ error_type=type(e).__name__,
80
+ title=str(e),
81
+ message_id=parsed_message.get('message_id') if 'parsed_message' in locals() else None
82
+ )
83
+ self.reply(error_response)
84
+
85
+ latency_ms = round((time.time() - start) * 1000)
86
+ self.log.info(f"App manager handler processed in {latency_ms} ms.")
87
+
88
+ async def _handle_manage_app(self, request: ManageAppRequest) -> None:
89
+ """Handle a manage app request with hardcoded data."""
90
+ try:
91
+ jwt_token = request.jwt_token
92
+ headers = {}
93
+ if jwt_token and jwt_token != 'placeholder-jwt-token':
94
+ headers['Authorization'] = f'Bearer {jwt_token}'
95
+ else:
96
+ self.log.warning("No JWT token provided for API request")
97
+ return
98
+
99
+ manage_apps_response = requests.get(f"{ACTIVE_STREAMLIT_BASE_URL}/manage-apps",
100
+ headers=headers)
101
+ manage_apps_response.raise_for_status()
102
+
103
+ apps_data = manage_apps_response.json()
104
+
105
+ # Create successful response
106
+ reply = ManageAppReply(
107
+ apps=apps_data,
108
+ message_id=request.message_id
109
+ )
110
+ self.reply(reply)
111
+
112
+ except Exception as e:
113
+ self.log.error(f"Error handling manage app request: {e}", exc_info=e)
114
+
115
+ try:
116
+ error = AppManagerError.from_exception(e)
117
+ except Exception:
118
+ error = AppManagerError(
119
+ error_type=type(e).__name__,
120
+ title=str(e)
121
+ )
122
+
123
+ # Return error response
124
+ error_reply = ManageAppReply(
125
+ apps=[],
126
+ error=error,
127
+ message_id=request.message_id
128
+ )
129
+ self.reply(error_reply)
@@ -0,0 +1,58 @@
1
+ # Copyright (c) Saga Inc.
2
+
3
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
4
+
5
+ from dataclasses import dataclass, field
6
+ from enum import Enum
7
+ from typing import List, Optional
8
+
9
+ class MessageType(str, Enum):
10
+ """Types of app manager messages."""
11
+ MANAGE_APP = "manage-app"
12
+
13
+
14
+ @dataclass(frozen=True)
15
+ class ManageAppRequest:
16
+ """Request to manage apps."""
17
+ type: str = "manage-app"
18
+ jwt_token: Optional[str] = None
19
+ message_id: Optional[str] = None
20
+
21
+ @dataclass(frozen=True)
22
+ class App:
23
+ """App information."""
24
+ app_name: str
25
+ url: str
26
+ status: str
27
+ created_at: str
28
+
29
+ @dataclass(frozen=True)
30
+ class AppManagerError:
31
+ """Error information for app manager operations."""
32
+ error_type: str
33
+ title: str
34
+ traceback: Optional[str] = None
35
+
36
+ @classmethod
37
+ def from_exception(cls, exc: Exception) -> 'AppManagerError':
38
+ return cls(
39
+ error_type=type(exc).__name__,
40
+ title=str(exc),
41
+ traceback=str(exc)
42
+ )
43
+
44
+ @dataclass(frozen=True)
45
+ class ManageAppReply:
46
+ """Reply to a manage app request."""
47
+ type: str = "manage-app"
48
+ apps: List[App] = field(default_factory=list)
49
+ error: Optional[AppManagerError] = None
50
+ message_id: Optional[str] = None
51
+
52
+ @dataclass(frozen=True)
53
+ class ErrorMessage:
54
+ """Error message."""
55
+ error_type: str
56
+ title: str
57
+ traceback: Optional[str] = None
58
+ message_id: Optional[str] = None
@@ -38,7 +38,7 @@ class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
38
38
  display_prompt = metadata.input
39
39
 
40
40
  # Add the prompt to the message history
41
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput)
41
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
42
42
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
43
43
 
44
44
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
@@ -47,7 +47,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
47
47
  display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
48
48
 
49
49
  # Add the prompt to the message history
50
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput)
50
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
51
51
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
52
52
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
53
53
 
@@ -110,7 +110,7 @@ class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
110
110
  display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
111
111
 
112
112
  # Add the prompt to the message history
113
- new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput)
113
+ new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.base64EncodedUploadedImage)
114
114
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
115
115
  await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
116
116
 
@@ -6,25 +6,33 @@ from mito_ai.completions.message_history import GlobalMessageHistory
6
6
  from mito_ai.completions.models import ThreadID
7
7
  from mito_ai.completions.providers import OpenAIProvider
8
8
  from openai.types.chat import ChatCompletionMessageParam
9
- from mito_ai.completions.prompt_builders.chat_system_message import create_chat_system_message_prompt
10
- from mito_ai.completions.prompt_builders.agent_system_message import create_agent_system_message_prompt
9
+ from mito_ai.completions.prompt_builders.chat_system_message import (
10
+ create_chat_system_message_prompt,
11
+ )
12
+ from mito_ai.completions.prompt_builders.agent_system_message import (
13
+ create_agent_system_message_prompt,
14
+ )
15
+
11
16
 
12
17
  async def append_chat_system_message(
13
- message_history: GlobalMessageHistory,
14
- model: str,
15
- provider: OpenAIProvider,
16
- thread_id: ThreadID
18
+ message_history: GlobalMessageHistory,
19
+ model: str,
20
+ provider: OpenAIProvider,
21
+ thread_id: ThreadID,
17
22
  ) -> None:
18
-
23
+
19
24
  # If the system message already exists, do nothing
20
- if any(msg["role"] == "system" for msg in message_history.get_ai_optimized_history(thread_id)):
25
+ if any(
26
+ msg["role"] == "system"
27
+ for msg in message_history.get_ai_optimized_history(thread_id)
28
+ ):
21
29
  return
22
-
30
+
23
31
  system_message_prompt = create_chat_system_message_prompt()
24
32
 
25
33
  system_message: ChatCompletionMessageParam = {
26
34
  "role": "system",
27
- "content": system_message_prompt
35
+ "content": system_message_prompt,
28
36
  }
29
37
 
30
38
  await message_history.append_message(
@@ -32,54 +40,86 @@ async def append_chat_system_message(
32
40
  display_message=system_message,
33
41
  model=model,
34
42
  llm_provider=provider,
35
- thread_id=thread_id
43
+ thread_id=thread_id,
36
44
  )
37
45
 
46
+
38
47
  async def append_agent_system_message(
39
- message_history: GlobalMessageHistory,
40
- model: str,
41
- provider: OpenAIProvider,
42
- thread_id: ThreadID,
43
- isChromeBrowser: bool
48
+ message_history: GlobalMessageHistory,
49
+ model: str,
50
+ provider: OpenAIProvider,
51
+ thread_id: ThreadID,
52
+ isChromeBrowser: bool,
44
53
  ) -> None:
45
-
54
+
46
55
  # If the system message already exists, do nothing
47
- if any(msg["role"] == "system" for msg in message_history.get_ai_optimized_history(thread_id)):
56
+ if any(
57
+ msg["role"] == "system"
58
+ for msg in message_history.get_ai_optimized_history(thread_id)
59
+ ):
48
60
  return
49
-
61
+
50
62
  system_message_prompt = create_agent_system_message_prompt(isChromeBrowser)
51
-
63
+
52
64
  system_message: ChatCompletionMessageParam = {
53
65
  "role": "system",
54
- "content": system_message_prompt
66
+ "content": system_message_prompt,
55
67
  }
56
-
68
+
57
69
  await message_history.append_message(
58
70
  ai_optimized_message=system_message,
59
71
  display_message=system_message,
60
72
  model=model,
61
73
  llm_provider=provider,
62
- thread_id=thread_id
74
+ thread_id=thread_id,
63
75
  )
64
-
65
- def create_ai_optimized_message(text: str, base64EncodedActiveCellOutput: Optional[str] = None) -> ChatCompletionMessageParam:
76
+
77
+
78
+ def create_ai_optimized_message(
79
+ text: str,
80
+ base64EncodedActiveCellOutput: Optional[str] = None,
81
+ base64EncodedUploadedImage: Optional[str] = None,
82
+ ) -> ChatCompletionMessageParam:
66
83
 
67
84
  message_content: Union[str, List[Dict[str, Any]]]
68
- if base64EncodedActiveCellOutput is not None and base64EncodedActiveCellOutput != '':
69
- message_content = [
85
+ has_uploaded_image = (
86
+ base64EncodedUploadedImage is not None and base64EncodedUploadedImage != ""
87
+ )
88
+ has_active_cell_output = (
89
+ base64EncodedActiveCellOutput is not None
90
+ and base64EncodedActiveCellOutput != ""
91
+ )
92
+
93
+ if has_uploaded_image or has_active_cell_output:
94
+ message_content = [
70
95
  {
71
96
  "type": "text",
72
97
  "text": text,
73
- },
74
- {
75
- "type": "image_url",
76
- "image_url": {"url": f"data:image/png;base64,{base64EncodedActiveCellOutput}"},
77
98
  }
78
- ]
99
+ ]
100
+
101
+ if has_uploaded_image:
102
+ message_content.append(
103
+ {
104
+ "type": "image_url",
105
+ "image_url": {
106
+ "url": f"data:image/png;base64,{base64EncodedUploadedImage}"
107
+ },
108
+ }
109
+ )
110
+
111
+ if has_active_cell_output:
112
+ message_content.append(
113
+ {
114
+ "type": "image_url",
115
+ "image_url": {
116
+ "url": f"data:image/png;base64,{base64EncodedActiveCellOutput}"
117
+ },
118
+ }
119
+ )
79
120
  else:
80
121
  message_content = text
81
-
82
- return cast(ChatCompletionMessageParam, {
83
- "role": "user",
84
- "content": message_content
85
- })
122
+
123
+ return cast(
124
+ ChatCompletionMessageParam, {"role": "user", "content": message_content}
125
+ )
@@ -46,7 +46,7 @@ from mito_ai.completions.completion_handlers.agent_execution_handler import get_
46
46
  from mito_ai.completions.completion_handlers.agent_auto_error_fixup_handler import get_agent_auto_error_fixup_completion
47
47
  from mito_ai.utils.telemetry_utils import identify
48
48
 
49
- FALLBACK_MODEL = "gpt-5" # Default model to use for safety
49
+ FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
50
50
 
51
51
  # The GlobalMessageHistory is responsible for updating the message histories stored in the .mito/ai-chats directory.
52
52
  # We create one GlobalMessageHistory per backend server instance instead of one per websocket connection so that the
@@ -251,7 +251,15 @@ class GlobalMessageHistory:
251
251
  with self._lock:
252
252
  if thread_id not in self._chat_threads:
253
253
  return []
254
- return self._chat_threads[thread_id].display_history
254
+
255
+ thread = self._chat_threads[thread_id]
256
+ display_history = thread.display_history
257
+
258
+ # When we get a thread, update it's last interaction time so that if the
259
+ # user refreshes their browser, this chat will re-appear as the last opened chat.
260
+ self._update_last_interaction(thread)
261
+ self._save_thread_to_disk(thread)
262
+ return display_history
255
263
 
256
264
  async def append_message(
257
265
  self,
@@ -29,7 +29,7 @@ class CellUpdate(BaseModel):
29
29
  # for now and rely on the AI to respond with the correct types, following the format
30
30
  # that we show it in the system prompt.
31
31
  class AgentResponse(BaseModel):
32
- type: Literal['cell_update', 'get_cell_output', 'finished_task']
32
+ type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task']
33
33
  message: str
34
34
  cell_update: Optional[CellUpdate]
35
35
  get_cell_output_cell_id: Optional[str]
@@ -83,6 +83,7 @@ class ChatMessageMetadata():
83
83
  variables: Optional[List[str]] = None
84
84
  files: Optional[List[str]] = None
85
85
  base64EncodedActiveCellOutput: Optional[str] = None
86
+ base64EncodedUploadedImage: Optional[str] = None
86
87
  index: Optional[int] = None
87
88
  stream: bool = False
88
89
  additionalContext: Optional[List[Dict[str, str]]] = None
@@ -96,6 +97,7 @@ class AgentExecutionMetadata():
96
97
  aiOptimizedCells: List[AIOptimizedCell]
97
98
  isChromeBrowser: bool
98
99
  base64EncodedActiveCellOutput: Optional[str] = None
100
+ base64EncodedUploadedImage: Optional[str] = None
99
101
  variables: Optional[List[str]] = None
100
102
  files: Optional[List[str]] = None
101
103
  index: Optional[int] = None
@@ -13,12 +13,14 @@ from mito_ai.completions.prompt_builders.utils import (
13
13
  get_selected_context_str,
14
14
  )
15
15
 
16
+
16
17
  def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
17
18
  variables_str = '\n'.join([f"{variable}" for variable in md.variables or []])
18
19
  files_str = '\n'.join([f"{file}" for file in md.files or []])
19
20
  ai_optimized_cells_str = '\n'.join([f"{cell}" for cell in md.aiOptimizedCells or []])
20
21
  rules_str = get_rules_str(md.additionalContext)
21
22
  selected_context_str = get_selected_context_str(md.additionalContext)
23
+
22
24
  context_str = f"""Remember to choose the correct tool to respond with.
23
25
 
24
26
  {rules_str}
@@ -54,6 +54,14 @@ ERROR CORRECTION:
54
54
  - Reuse as much of the existing code as possible.
55
55
  - DO NOT ADD TEMPORARY COMMENTS like '# Fixed the typo here' or '# Added this line to fix the error'
56
56
  - If you encounter a ModuleNotFoundError, you can install the package by adding the the following line to the top of the code cell: `!pip install <package_name> --quiet`.
57
+ - If you encounter a NameError, you can use the RUN_ALL_CELLS tool to run all cells from the top of the notebook to the bottom to bring the variable into scope.
58
+ RUN_ALL_CELLS:
59
+ When you want to execute all cells in the notebook from top to bottom, respond with this format:
60
+ {{
61
+ type: 'run_all_cells',
62
+ message: str
63
+ }}
64
+ Note that if the name error persists even after using run_all_cells, it means that the variable is not defined in the notebook and you should not reuse this tool. Additionally, this tool could also be used to refresh the notebook state.
57
65
 
58
66
  <Example>
59
67
 
@@ -206,6 +206,23 @@ Important information:
206
206
  ===='''
207
207
  }
208
208
 
209
+ TOOL: RUN_ALL_CELLS
210
+
211
+ When you want to execute all cells in the notebook from top to bottom, respond with this format:
212
+
213
+ {{
214
+ type: 'run_all_cells',
215
+ message: str
216
+ }}
217
+
218
+ Important information:
219
+ 1. Use this tool when you encounter a NameError. For example, if you get an error like "NameError: name 'prompts_df' is not defined", you should use this tool to run all cells from the top of the notebook to the bottom to bring the variable into scope.
220
+ 2. Note that if the name error persists even after using run_all_cells, it means that the variable is not defined in the notebook and you should not reuse this tool.
221
+ 3. Additionally, this tool could also be used to refresh the notebook state.
222
+ 4. If running all cells results in an error, the system will automatically handle the error through the normal error fixing process.
223
+ 5. Do not use this tool repeatedly if it continues to produce errors - instead, focus on fixing the specific error that occurred.
224
+ ====
225
+
209
226
  TOOL: FINISHED_TASK
210
227
 
211
228
  When you have completed the user's task, respond with a message in this format:
@@ -38,6 +38,7 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
38
38
  selected_variables = [context["value"] for context in additional_context if context.get("type") == "variable"]
39
39
  selected_files = [context["value"] for context in additional_context if context.get("type") == "file"]
40
40
  selected_db_connections = [context["value"] for context in additional_context if context.get("type") == "db"]
41
+ selected_images = [context["value"] for context in additional_context if context.get("type") == "img"]
41
42
 
42
43
  # STEP 2: Create a list of strings (instructions) for each context type
43
44
  context_parts = []
@@ -60,6 +61,12 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
60
61
  + "\n".join(selected_db_connections)
61
62
  )
62
63
 
64
+ if len(selected_images) > 0:
65
+ context_parts.append(
66
+ "The following images have been selected by the user to be used in the task:\n"
67
+ + "\n".join(selected_images)
68
+ )
69
+
63
70
  # STEP 3: Combine into a single string
64
71
 
65
72
  return "\n\n".join(context_parts)
mito_ai/constants.py CHANGED
@@ -24,8 +24,9 @@ AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
24
24
  AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL")
25
25
 
26
26
  # Mito AI Base URLs and Endpoint Paths
27
- MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v1"
28
- MITO_DEV_BASE_URL = "https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/v1"
27
+ MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v2"
28
+ MITO_DEV_BASE_URL = "https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/v2"
29
+ MITO_LOCAL_BASE_URL = "http://127.0.0.1:8000/v2" # When you are running the mito completion server locally
29
30
 
30
31
  # Set ACTIVE_BASE_URL manually
31
32
  ACTIVE_BASE_URL = MITO_PROD_BASE_URL # Change to MITO_DEV_BASE_URL for dev
@@ -0,0 +1,3 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+