mito-ai 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +10 -1
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +92 -8
- mito_ai/app_deploy/app_deploy_utils.py +25 -0
- mito_ai/app_deploy/handlers.py +9 -12
- mito_ai/app_deploy/models.py +4 -1
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/handlers.py +44 -20
- mito_ai/completions/models.py +1 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +22 -4
- mito_ai/constants.py +3 -0
- mito_ai/streamlit_conversion/agent_utils.py +148 -30
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +147 -24
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +2 -1
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +2 -2
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +4 -3
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +101 -104
- mito_ai/streamlit_conversion/streamlit_system_prompt.py +1 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +18 -17
- mito_ai/streamlit_conversion/validate_streamlit_app.py +66 -62
- mito_ai/streamlit_preview/handlers.py +5 -3
- mito_ai/streamlit_preview/utils.py +11 -7
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +71 -0
- mito_ai/tests/message_history/test_message_history_utils.py +43 -19
- mito_ai/tests/providers/test_anthropic_client.py +180 -8
- mito_ai/tests/streamlit_conversion/test_apply_patch_to_text.py +368 -0
- mito_ai/tests/streamlit_conversion/test_fix_diff_headers.py +533 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +71 -158
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +16 -16
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +16 -28
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +2 -2
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/test_anthropic_utils.py +4 -4
- mito_ai/user/handlers.py +33 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/anthropic_utils.py +15 -21
- mito_ai/utils/message_history_utils.py +4 -3
- mito_ai/utils/telemetry_utils.py +7 -4
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +100 -100
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.cf2e3ad2797fbb53826b.js → mito_ai-0.1.46.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.20f12766ecd3d430568e.js +1520 -300
- mito_ai-0.1.46.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.20f12766ecd3d430568e.js.map +1 -0
- mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.5482493d1270f55b7283.js → mito_ai-0.1.46.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.54126ab6511271265443.js +18 -18
- mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.5482493d1270f55b7283.js.map → mito_ai-0.1.46.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.54126ab6511271265443.js.map +1 -1
- {mito_ai-0.1.44.dist-info → mito_ai-0.1.46.dist-info}/METADATA +2 -2
- {mito_ai-0.1.44.dist-info → mito_ai-0.1.46.dist-info}/RECORD +75 -63
- mito_ai-0.1.44.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.cf2e3ad2797fbb53826b.js.map +0 -1
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.44.data → mito_ai-0.1.46.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.44.dist-info → mito_ai-0.1.46.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.44.dist-info → mito_ai-0.1.46.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.44.dist-info → mito_ai-0.1.46.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py
CHANGED
|
@@ -5,6 +5,7 @@ from typing import List, Dict
|
|
|
5
5
|
from jupyter_server.utils import url_path_join
|
|
6
6
|
from mito_ai.completions.handlers import CompletionHandler
|
|
7
7
|
from mito_ai.completions.providers import OpenAIProvider
|
|
8
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
8
9
|
from mito_ai.app_deploy.handlers import AppDeployHandler
|
|
9
10
|
from mito_ai.streamlit_preview.handlers import StreamlitPreviewHandler
|
|
10
11
|
from mito_ai.log.urls import get_log_urls
|
|
@@ -16,6 +17,8 @@ from mito_ai.auth.urls import get_auth_urls
|
|
|
16
17
|
from mito_ai.streamlit_preview.urls import get_streamlit_preview_urls
|
|
17
18
|
from mito_ai.app_manager.handlers import AppManagerHandler
|
|
18
19
|
from mito_ai.file_uploads.urls import get_file_uploads_urls
|
|
20
|
+
from mito_ai.user.urls import get_user_urls
|
|
21
|
+
from mito_ai.chat_history.urls import get_chat_history_urls
|
|
19
22
|
|
|
20
23
|
# Force Matplotlib to use the Jupyter inline backend.
|
|
21
24
|
# Background: importing Streamlit sets os.environ["MPLBACKEND"] = "Agg" very early.
|
|
@@ -62,13 +65,17 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
|
|
|
62
65
|
base_url = web_app.settings["base_url"]
|
|
63
66
|
|
|
64
67
|
open_ai_provider = OpenAIProvider(config=server_app.config)
|
|
68
|
+
|
|
69
|
+
# Create a single GlobalMessageHistory instance for the entire server
|
|
70
|
+
# This ensures thread-safe access to the .mito/ai-chats directory
|
|
71
|
+
global_message_history = GlobalMessageHistory()
|
|
65
72
|
|
|
66
73
|
# WebSocket handlers
|
|
67
74
|
handlers = [
|
|
68
75
|
(
|
|
69
76
|
url_path_join(base_url, "mito-ai", "completions"),
|
|
70
77
|
CompletionHandler,
|
|
71
|
-
{"llm": open_ai_provider},
|
|
78
|
+
{"llm": open_ai_provider, "message_history": global_message_history},
|
|
72
79
|
),
|
|
73
80
|
(
|
|
74
81
|
url_path_join(base_url, "mito-ai", "app-deploy"),
|
|
@@ -100,6 +107,8 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
|
|
|
100
107
|
handlers.extend(get_auth_urls(base_url)) # type: ignore
|
|
101
108
|
handlers.extend(get_streamlit_preview_urls(base_url)) # type: ignore
|
|
102
109
|
handlers.extend(get_file_uploads_urls(base_url)) # type: ignore
|
|
110
|
+
handlers.extend(get_user_urls(base_url)) # type: ignore
|
|
111
|
+
handlers.extend(get_chat_history_urls(base_url, global_message_history)) # type: ignore
|
|
103
112
|
|
|
104
113
|
web_app.add_handlers(host_pattern, handlers)
|
|
105
114
|
server_app.log.info("Loaded the mito_ai server extension")
|
mito_ai/_version.py
CHANGED
mito_ai/anthropic_client.py
CHANGED
|
@@ -5,9 +5,9 @@ import json
|
|
|
5
5
|
import anthropic
|
|
6
6
|
from typing import Dict, Any, Optional, Tuple, Union, Callable, List, cast
|
|
7
7
|
|
|
8
|
-
from anthropic.types import Message, MessageParam
|
|
9
|
-
from mito_ai.completions.models import
|
|
10
|
-
from mito_ai.
|
|
8
|
+
from anthropic.types import Message, MessageParam, TextBlockParam
|
|
9
|
+
from mito_ai.completions.models import ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem, MessageType
|
|
10
|
+
from mito_ai.constants import MESSAGE_HISTORY_TRIM_THRESHOLD
|
|
11
11
|
from openai.types.chat import ChatCompletionMessageParam
|
|
12
12
|
from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params
|
|
13
13
|
|
|
@@ -52,12 +52,12 @@ def extract_and_parse_anthropic_json_response(response: Message) -> Union[object
|
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
def get_anthropic_system_prompt_and_messages(messages: List[ChatCompletionMessageParam]) -> Tuple[
|
|
55
|
-
Union[str, anthropic.
|
|
55
|
+
Union[str, anthropic.Omit], List[MessageParam]]:
|
|
56
56
|
"""
|
|
57
57
|
Convert a list of OpenAI messages to a list of Anthropic messages.
|
|
58
58
|
"""
|
|
59
59
|
|
|
60
|
-
system_prompt: Union[str, anthropic.
|
|
60
|
+
system_prompt: Union[str, anthropic.Omit] = anthropic.Omit()
|
|
61
61
|
anthropic_messages: List[MessageParam] = []
|
|
62
62
|
|
|
63
63
|
for message in messages:
|
|
@@ -125,6 +125,90 @@ def get_anthropic_system_prompt_and_messages(messages: List[ChatCompletionMessag
|
|
|
125
125
|
return system_prompt, anthropic_messages
|
|
126
126
|
|
|
127
127
|
|
|
128
|
+
def add_cache_control_to_message(message: MessageParam) -> MessageParam:
|
|
129
|
+
"""
|
|
130
|
+
Adds cache_control to a message's content.
|
|
131
|
+
Handles both string content and list of content blocks.
|
|
132
|
+
"""
|
|
133
|
+
content = message.get("content")
|
|
134
|
+
|
|
135
|
+
if isinstance(content, str):
|
|
136
|
+
# Simple string content - convert to list format with cache_control
|
|
137
|
+
return {
|
|
138
|
+
"role": message["role"],
|
|
139
|
+
"content": [
|
|
140
|
+
{
|
|
141
|
+
"type": "text",
|
|
142
|
+
"text": content,
|
|
143
|
+
"cache_control": {"type": "ephemeral"}
|
|
144
|
+
}
|
|
145
|
+
]
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
elif isinstance(content, list) and len(content) > 0:
|
|
149
|
+
# List of content blocks - add cache_control to last block
|
|
150
|
+
content_blocks = content.copy()
|
|
151
|
+
last_block = content_blocks[-1].copy()
|
|
152
|
+
last_block["cache_control"] = {"type": "ephemeral"}
|
|
153
|
+
content_blocks[-1] = last_block
|
|
154
|
+
|
|
155
|
+
return {
|
|
156
|
+
"role": message["role"],
|
|
157
|
+
"content": content_blocks
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
else:
|
|
161
|
+
# Edge case: empty or malformed content
|
|
162
|
+
return message
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def get_anthropic_system_prompt_and_messages_with_caching(messages: List[ChatCompletionMessageParam]) -> Tuple[
|
|
166
|
+
Union[str, List[TextBlockParam], anthropic.Omit], List[MessageParam]]:
|
|
167
|
+
"""
|
|
168
|
+
Convert a list of OpenAI messages to a list of Anthropic messages with caching applied.
|
|
169
|
+
|
|
170
|
+
Caching Strategy:
|
|
171
|
+
1. System prompt (static) → Always cached
|
|
172
|
+
2. Stable conversation history → Cache at keep_recent boundary
|
|
173
|
+
3. Recent messages → Never cached (always fresh)
|
|
174
|
+
|
|
175
|
+
The keep_recent parameter determines which messages are stable and won't be trimmed.
|
|
176
|
+
We cache at the keep_recent boundary because those messages are guaranteed to be stable.
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
# Get the base system prompt and messages
|
|
180
|
+
system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages(messages)
|
|
181
|
+
|
|
182
|
+
# 1. Cache the system prompt always
|
|
183
|
+
# If the system prompt is something like anthropic.Omit, we don't need to cache it
|
|
184
|
+
cached_system_prompt: Union[str, List[TextBlockParam], anthropic.Omit] = system_prompt
|
|
185
|
+
if isinstance(system_prompt, str):
|
|
186
|
+
cached_system_prompt = [{
|
|
187
|
+
"type": "text",
|
|
188
|
+
"text": system_prompt,
|
|
189
|
+
"cache_control": {"type": "ephemeral"}
|
|
190
|
+
}]
|
|
191
|
+
|
|
192
|
+
# 2. Cache conversation history at the boundary where the messages are stable.
|
|
193
|
+
# Messages are stable after they are more than MESSAGE_HISTORY_TRIM_THRESHOLD old.
|
|
194
|
+
# At this point, the messages are not edited anymore, so they will not invalidate the cache.
|
|
195
|
+
# If we included the messages before the boundary in the cache, then every time we send a new
|
|
196
|
+
# message, we would invalidate the cache and we would never get a cache hit except for the system prompt.
|
|
197
|
+
messages_with_cache = []
|
|
198
|
+
|
|
199
|
+
if len(anthropic_messages) > 0:
|
|
200
|
+
cache_boundary = len(anthropic_messages) - MESSAGE_HISTORY_TRIM_THRESHOLD - 1
|
|
201
|
+
|
|
202
|
+
# Add all messages, but only add cache_control to the message at the boundary
|
|
203
|
+
for i, msg in enumerate(anthropic_messages):
|
|
204
|
+
if i == cache_boundary:
|
|
205
|
+
messages_with_cache.append(add_cache_control_to_message(msg))
|
|
206
|
+
else:
|
|
207
|
+
messages_with_cache.append(msg)
|
|
208
|
+
|
|
209
|
+
return cached_system_prompt, messages_with_cache
|
|
210
|
+
|
|
211
|
+
|
|
128
212
|
class AnthropicClient:
|
|
129
213
|
"""
|
|
130
214
|
A client for interacting with the Anthropic API or the Mito server fallback.
|
|
@@ -149,7 +233,7 @@ class AnthropicClient:
|
|
|
149
233
|
"""
|
|
150
234
|
Get a response from Claude or the Mito server that adheres to the AgentResponse format.
|
|
151
235
|
"""
|
|
152
|
-
anthropic_system_prompt, anthropic_messages =
|
|
236
|
+
anthropic_system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
|
|
153
237
|
|
|
154
238
|
provider_data = get_anthropic_completion_function_params(
|
|
155
239
|
message_type=message_type,
|
|
@@ -166,6 +250,7 @@ class AnthropicClient:
|
|
|
166
250
|
# Unpack provider_data for direct API call
|
|
167
251
|
assert self.client is not None
|
|
168
252
|
response = self.client.messages.create(**provider_data)
|
|
253
|
+
|
|
169
254
|
if provider_data.get("tool_choice") is not None:
|
|
170
255
|
result = extract_and_parse_anthropic_json_response(response)
|
|
171
256
|
return json.dumps(result) if not isinstance(result, str) else result
|
|
@@ -192,7 +277,7 @@ class AnthropicClient:
|
|
|
192
277
|
async def stream_completions(self, messages: List[ChatCompletionMessageParam], model: str, message_id: str, message_type: MessageType,
|
|
193
278
|
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]) -> str:
|
|
194
279
|
try:
|
|
195
|
-
anthropic_system_prompt, anthropic_messages =
|
|
280
|
+
anthropic_system_prompt, anthropic_messages = get_anthropic_system_prompt_and_messages_with_caching(messages)
|
|
196
281
|
accumulated_response = ""
|
|
197
282
|
|
|
198
283
|
if self.api_key:
|
|
@@ -206,7 +291,6 @@ class AnthropicClient:
|
|
|
206
291
|
stream=True
|
|
207
292
|
)
|
|
208
293
|
|
|
209
|
-
|
|
210
294
|
for chunk in stream:
|
|
211
295
|
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
|
|
212
296
|
content = chunk.delta.text
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import zipfile
|
|
6
|
+
import logging
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
|
|
9
|
+
def add_files_to_zip(zip_path: str, base_path: str, files_to_add: List[str], logger: Optional[logging.Logger] = None) -> None:
|
|
10
|
+
"""Create a zip file at zip_path and add the selected files/folders."""
|
|
11
|
+
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
|
|
12
|
+
for rel_path in files_to_add:
|
|
13
|
+
abs_path = os.path.join(base_path, rel_path)
|
|
14
|
+
|
|
15
|
+
if os.path.isfile(abs_path):
|
|
16
|
+
zipf.write(abs_path, arcname=rel_path)
|
|
17
|
+
elif os.path.isdir(abs_path):
|
|
18
|
+
for root, _, files in os.walk(abs_path):
|
|
19
|
+
for file in files:
|
|
20
|
+
file_abs = os.path.join(root, file)
|
|
21
|
+
arcname = os.path.relpath(file_abs, base_path)
|
|
22
|
+
zipf.write(file_abs, arcname=arcname)
|
|
23
|
+
else:
|
|
24
|
+
if logger:
|
|
25
|
+
logger.warning(f"Skipping missing file: {abs_path}")
|
mito_ai/app_deploy/handlers.py
CHANGED
|
@@ -4,13 +4,13 @@
|
|
|
4
4
|
import os
|
|
5
5
|
import time
|
|
6
6
|
import logging
|
|
7
|
-
from typing import Any, Union,
|
|
8
|
-
import zipfile
|
|
7
|
+
from typing import Any, Union, List
|
|
9
8
|
import tempfile
|
|
10
9
|
from mito_ai.streamlit_conversion.streamlit_utils import get_app_path
|
|
11
10
|
from mito_ai.utils.create import initialize_user
|
|
12
11
|
from mito_ai.utils.version_utils import is_pro
|
|
13
12
|
from mito_ai.utils.websocket_base import BaseWebSocketHandler
|
|
13
|
+
from mito_ai.app_deploy.app_deploy_utils import add_files_to_zip
|
|
14
14
|
from mito_ai.app_deploy.models import (
|
|
15
15
|
DeployAppReply,
|
|
16
16
|
AppDeployError,
|
|
@@ -18,7 +18,6 @@ from mito_ai.app_deploy.models import (
|
|
|
18
18
|
ErrorMessage,
|
|
19
19
|
MessageType
|
|
20
20
|
)
|
|
21
|
-
from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
|
|
22
21
|
from mito_ai.logger import get_logger
|
|
23
22
|
from mito_ai.constants import ACTIVE_STREAMLIT_BASE_URL
|
|
24
23
|
import requests
|
|
@@ -111,6 +110,7 @@ class AppDeployHandler(BaseWebSocketHandler):
|
|
|
111
110
|
message_id = message.message_id
|
|
112
111
|
notebook_path = message.notebook_path
|
|
113
112
|
jwt_token = message.jwt_token
|
|
113
|
+
files_to_upload = message.selected_files
|
|
114
114
|
|
|
115
115
|
if not message_id:
|
|
116
116
|
self.log.error("Missing message_id in request")
|
|
@@ -168,7 +168,7 @@ class AppDeployHandler(BaseWebSocketHandler):
|
|
|
168
168
|
))
|
|
169
169
|
|
|
170
170
|
# Finally, deploy the app
|
|
171
|
-
deploy_url = await self._deploy_app(app_directory, jwt_token)
|
|
171
|
+
deploy_url = await self._deploy_app(app_directory, files_to_upload, jwt_token)
|
|
172
172
|
|
|
173
173
|
# Send the response
|
|
174
174
|
self.reply(DeployAppReply(
|
|
@@ -219,11 +219,12 @@ class AppDeployHandler(BaseWebSocketHandler):
|
|
|
219
219
|
return False
|
|
220
220
|
|
|
221
221
|
|
|
222
|
-
async def _deploy_app(self, app_path: str, jwt_token: str = '') -> str:
|
|
222
|
+
async def _deploy_app(self, app_path: str, files_to_upload:List[str], jwt_token: str = '') -> str:
|
|
223
223
|
"""Deploy the app using pre-signed URLs.
|
|
224
224
|
|
|
225
225
|
Args:
|
|
226
226
|
app_path: Path to the app file.
|
|
227
|
+
files_to_upload: Files the user selected to upload for the app to run
|
|
227
228
|
jwt_token: JWT token for authentication (optional)
|
|
228
229
|
|
|
229
230
|
Returns:
|
|
@@ -258,16 +259,12 @@ class AppDeployHandler(BaseWebSocketHandler):
|
|
|
258
259
|
# Step 2: Create a zip file of the app.
|
|
259
260
|
temp_zip_path = None
|
|
260
261
|
try:
|
|
261
|
-
# Create temp file
|
|
262
|
-
with tempfile.NamedTemporaryFile(suffix=
|
|
262
|
+
# Create temp file
|
|
263
|
+
with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as temp_zip:
|
|
263
264
|
temp_zip_path = temp_zip.name
|
|
264
265
|
|
|
265
266
|
self.log.info("Zipping application files...")
|
|
266
|
-
|
|
267
|
-
for root, _, files in os.walk(app_path):
|
|
268
|
-
for file in files:
|
|
269
|
-
file_path = os.path.join(root, file)
|
|
270
|
-
zipf.write(file_path, arcname=os.path.relpath(file_path, app_path))
|
|
267
|
+
add_files_to_zip(temp_zip_path, app_path, files_to_upload, self.log)
|
|
271
268
|
|
|
272
269
|
upload_response = await self._upload_app_to_s3(temp_zip_path, presigned_url)
|
|
273
270
|
except Exception as e:
|
mito_ai/app_deploy/models.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
from dataclasses import dataclass
|
|
5
5
|
from enum import Enum
|
|
6
|
-
from typing import Literal, Optional
|
|
6
|
+
from typing import Literal, Optional, List
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class MessageType(str, Enum):
|
|
@@ -66,6 +66,9 @@ class DeployAppRequest:
|
|
|
66
66
|
|
|
67
67
|
# Path to the app file.
|
|
68
68
|
notebook_path: str
|
|
69
|
+
|
|
70
|
+
# Files to be uploaded for the app to run
|
|
71
|
+
selected_files: List[str]
|
|
69
72
|
|
|
70
73
|
# JWT token for authorization.
|
|
71
74
|
jwt_token: Optional[str] = None
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import tornado
|
|
5
|
+
from typing import List, Any
|
|
6
|
+
from jupyter_server.base.handlers import APIHandler
|
|
7
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
8
|
+
from mito_ai.completions.models import ChatThreadMetadata
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ChatHistoryHandler(APIHandler):
|
|
12
|
+
"""
|
|
13
|
+
Endpoints for working with chat history threads.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def initialize(self, message_history: GlobalMessageHistory) -> None:
|
|
17
|
+
"""Initialize the handler with the global message history instance."""
|
|
18
|
+
super().initialize()
|
|
19
|
+
self._message_history = message_history
|
|
20
|
+
|
|
21
|
+
@tornado.web.authenticated
|
|
22
|
+
def get(self, *args: Any, **kwargs: Any) -> None:
|
|
23
|
+
"""Get all chat history threads or a specific thread by ID."""
|
|
24
|
+
try:
|
|
25
|
+
# Check if a specific thread ID is provided in the URL
|
|
26
|
+
thread_id = kwargs.get("thread_id")
|
|
27
|
+
|
|
28
|
+
if thread_id:
|
|
29
|
+
# Get specific thread
|
|
30
|
+
if thread_id in self._message_history._chat_threads:
|
|
31
|
+
thread = self._message_history._chat_threads[thread_id]
|
|
32
|
+
thread_data = {
|
|
33
|
+
"thread_id": thread.thread_id,
|
|
34
|
+
"name": thread.name,
|
|
35
|
+
"creation_ts": thread.creation_ts,
|
|
36
|
+
"last_interaction_ts": thread.last_interaction_ts,
|
|
37
|
+
"display_history": thread.display_history,
|
|
38
|
+
"ai_optimized_history": thread.ai_optimized_history,
|
|
39
|
+
}
|
|
40
|
+
self.finish(thread_data)
|
|
41
|
+
else:
|
|
42
|
+
self.set_status(404)
|
|
43
|
+
self.finish({"error": f"Thread with ID {thread_id} not found"})
|
|
44
|
+
else:
|
|
45
|
+
# Get all threads
|
|
46
|
+
threads: List[ChatThreadMetadata] = self._message_history.get_threads()
|
|
47
|
+
|
|
48
|
+
# Convert to dict format for JSON serialization
|
|
49
|
+
threads_data = [
|
|
50
|
+
{
|
|
51
|
+
"thread_id": thread.thread_id,
|
|
52
|
+
"name": thread.name,
|
|
53
|
+
"creation_ts": thread.creation_ts,
|
|
54
|
+
"last_interaction_ts": thread.last_interaction_ts,
|
|
55
|
+
}
|
|
56
|
+
for thread in threads
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
self.finish({"threads": threads_data})
|
|
60
|
+
|
|
61
|
+
except Exception as e:
|
|
62
|
+
self.set_status(500)
|
|
63
|
+
self.finish({"error": str(e)})
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Tuple, Any
|
|
5
|
+
from jupyter_server.utils import url_path_join
|
|
6
|
+
from mito_ai.chat_history.handlers import ChatHistoryHandler
|
|
7
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_chat_history_urls(base_url: str, message_history: GlobalMessageHistory) -> List[Tuple[str, Any, dict]]:
|
|
11
|
+
"""Get all chat history related URL patterns.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
base_url: The base URL for the Jupyter server
|
|
15
|
+
message_history: The global message history instance
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
List of (url_pattern, handler_class, handler_kwargs) tuples
|
|
19
|
+
"""
|
|
20
|
+
BASE_URL = base_url + "/mito-ai/chat-history"
|
|
21
|
+
return [
|
|
22
|
+
(
|
|
23
|
+
url_path_join(BASE_URL, "threads"),
|
|
24
|
+
ChatHistoryHandler,
|
|
25
|
+
{"message_history": message_history},
|
|
26
|
+
),
|
|
27
|
+
(
|
|
28
|
+
url_path_join(BASE_URL, "threads", "(?P<thread_id>[^/]+)"),
|
|
29
|
+
ChatHistoryHandler,
|
|
30
|
+
{"message_history": message_history},
|
|
31
|
+
),
|
|
32
|
+
]
|
mito_ai/completions/handlers.py
CHANGED
|
@@ -14,6 +14,7 @@ import tornado.web
|
|
|
14
14
|
from jupyter_core.utils import ensure_async
|
|
15
15
|
from jupyter_server.base.handlers import JupyterHandler
|
|
16
16
|
from tornado.websocket import WebSocketHandler
|
|
17
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
17
18
|
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
18
19
|
from mito_ai.logger import get_logger
|
|
19
20
|
from mito_ai.completions.models import (
|
|
@@ -48,11 +49,8 @@ from mito_ai.utils.telemetry_utils import identify
|
|
|
48
49
|
|
|
49
50
|
FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
|
|
50
51
|
|
|
51
|
-
# The GlobalMessageHistory is
|
|
52
|
-
#
|
|
53
|
-
# there is one manager of the locks for the .mito/ai-chats directory. This is my current understanding and it
|
|
54
|
-
# might be incorrect!
|
|
55
|
-
message_history = GlobalMessageHistory()
|
|
52
|
+
# The GlobalMessageHistory is now created in __init__.py and passed to handlers
|
|
53
|
+
# to ensure there's only one instance managing the .mito/ai-chats directory locks
|
|
56
54
|
|
|
57
55
|
# This handler is responsible for the mito_ai/completions endpoint.
|
|
58
56
|
# It takes a message from the user, sends it to the OpenAI API, and returns the response.
|
|
@@ -61,10 +59,11 @@ message_history = GlobalMessageHistory()
|
|
|
61
59
|
class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
62
60
|
"""Completion websocket handler."""
|
|
63
61
|
|
|
64
|
-
def initialize(self, llm: OpenAIProvider) -> None:
|
|
62
|
+
def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
|
|
65
63
|
super().initialize()
|
|
66
64
|
self.log.debug("Initializing websocket connection %s", self.request.path)
|
|
67
65
|
self._llm = llm
|
|
66
|
+
self._message_history = message_history
|
|
68
67
|
self.is_pro = is_pro()
|
|
69
68
|
self._selected_model = FALLBACK_MODEL
|
|
70
69
|
self.is_electron = False
|
|
@@ -149,7 +148,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
149
148
|
|
|
150
149
|
# Clear history if the type is "start_new_chat"
|
|
151
150
|
if type == MessageType.START_NEW_CHAT:
|
|
152
|
-
thread_id =
|
|
151
|
+
thread_id = self._message_history.create_new_thread()
|
|
153
152
|
|
|
154
153
|
reply = StartNewChatReply(
|
|
155
154
|
parent_id=parsed_message.get("message_id"),
|
|
@@ -160,7 +159,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
160
159
|
|
|
161
160
|
# Handle get_threads: return list of chat threads
|
|
162
161
|
if type == MessageType.GET_THREADS:
|
|
163
|
-
threads =
|
|
162
|
+
threads = self._message_history.get_threads()
|
|
164
163
|
reply = FetchThreadsReply(
|
|
165
164
|
parent_id=parsed_message.get("message_id"),
|
|
166
165
|
threads=threads
|
|
@@ -172,7 +171,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
172
171
|
if type == MessageType.DELETE_THREAD:
|
|
173
172
|
thread_id_to_delete = metadata_dict.get('thread_id')
|
|
174
173
|
if thread_id_to_delete:
|
|
175
|
-
is_thread_deleted =
|
|
174
|
+
is_thread_deleted = self._message_history.delete_thread(thread_id_to_delete)
|
|
176
175
|
reply = DeleteThreadReply(
|
|
177
176
|
parent_id=parsed_message.get("message_id"),
|
|
178
177
|
success=is_thread_deleted
|
|
@@ -188,7 +187,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
188
187
|
|
|
189
188
|
# If a thread_id is provided, use that thread's history; otherwise, use newest.
|
|
190
189
|
thread_id = metadata_dict.get('thread_id')
|
|
191
|
-
display_history =
|
|
190
|
+
display_history = self._message_history.get_display_history(thread_id)
|
|
192
191
|
|
|
193
192
|
reply = FetchHistoryReply(
|
|
194
193
|
parent_id=parsed_message.get('message_id'),
|
|
@@ -222,7 +221,32 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
222
221
|
)
|
|
223
222
|
self.reply(reply)
|
|
224
223
|
return
|
|
225
|
-
|
|
224
|
+
|
|
225
|
+
if type == MessageType.STOP_AGENT:
|
|
226
|
+
thread_id_to_stop = metadata_dict.get('threadId')
|
|
227
|
+
if thread_id_to_stop:
|
|
228
|
+
self.log.info(f"Stopping agent, thread ID: {thread_id_to_stop}")
|
|
229
|
+
|
|
230
|
+
ai_optimized_message: ChatCompletionMessageParam = {
|
|
231
|
+
"role": "assistant",
|
|
232
|
+
"content": "The user made the following request: Stop processing my last request. I want to change it. Please answer my future requests without going back and finising my previous request."
|
|
233
|
+
}
|
|
234
|
+
display_optimized_message: ChatCompletionMessageParam = {
|
|
235
|
+
"role": "assistant",
|
|
236
|
+
"content": "Agent interupted by user "
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
await self._message_history.append_message(
|
|
240
|
+
ai_optimized_message=ai_optimized_message,
|
|
241
|
+
display_message=display_optimized_message,
|
|
242
|
+
model=self._selected_model,
|
|
243
|
+
llm_provider=self._llm,
|
|
244
|
+
thread_id=thread_id_to_stop
|
|
245
|
+
)
|
|
246
|
+
else:
|
|
247
|
+
self.log.info("Trying to stop agent, but no thread ID available")
|
|
248
|
+
return
|
|
249
|
+
|
|
226
250
|
try:
|
|
227
251
|
# Get completion based on message type
|
|
228
252
|
completion = None
|
|
@@ -240,7 +264,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
240
264
|
await stream_chat_completion(
|
|
241
265
|
chat_metadata,
|
|
242
266
|
self._llm,
|
|
243
|
-
|
|
267
|
+
self._message_history,
|
|
244
268
|
message_id,
|
|
245
269
|
self.reply,
|
|
246
270
|
model
|
|
@@ -248,7 +272,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
248
272
|
return
|
|
249
273
|
else:
|
|
250
274
|
# Regular non-streaming completion
|
|
251
|
-
completion = await get_chat_completion(chat_metadata, self._llm,
|
|
275
|
+
completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
|
|
252
276
|
elif type == MessageType.SMART_DEBUG:
|
|
253
277
|
smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
|
|
254
278
|
# Handle streaming if requested and available
|
|
@@ -257,7 +281,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
257
281
|
await stream_smart_debug_completion(
|
|
258
282
|
smart_debug_metadata,
|
|
259
283
|
self._llm,
|
|
260
|
-
|
|
284
|
+
self._message_history,
|
|
261
285
|
message_id,
|
|
262
286
|
self.reply,
|
|
263
287
|
model
|
|
@@ -265,7 +289,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
265
289
|
return
|
|
266
290
|
else:
|
|
267
291
|
# Regular non-streaming completion
|
|
268
|
-
completion = await get_smart_debug_completion(smart_debug_metadata, self._llm,
|
|
292
|
+
completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
|
|
269
293
|
elif type == MessageType.CODE_EXPLAIN:
|
|
270
294
|
code_explain_metadata = CodeExplainMetadata(**metadata_dict)
|
|
271
295
|
|
|
@@ -275,7 +299,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
275
299
|
await stream_code_explain_completion(
|
|
276
300
|
code_explain_metadata,
|
|
277
301
|
self._llm,
|
|
278
|
-
|
|
302
|
+
self._message_history,
|
|
279
303
|
message_id,
|
|
280
304
|
self.reply,
|
|
281
305
|
model
|
|
@@ -283,16 +307,16 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
|
283
307
|
return
|
|
284
308
|
else:
|
|
285
309
|
# Regular non-streaming completion
|
|
286
|
-
completion = await get_code_explain_completion(code_explain_metadata, self._llm,
|
|
310
|
+
completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
|
|
287
311
|
elif type == MessageType.AGENT_EXECUTION:
|
|
288
312
|
agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
|
|
289
|
-
completion = await get_agent_execution_completion(agent_execution_metadata, self._llm,
|
|
313
|
+
completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
|
|
290
314
|
elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
|
|
291
315
|
agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
|
|
292
|
-
completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm,
|
|
316
|
+
completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
|
|
293
317
|
elif type == MessageType.INLINE_COMPLETION:
|
|
294
318
|
inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
|
|
295
|
-
completion = await get_inline_completion(inline_completer_metadata, self._llm,
|
|
319
|
+
completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
|
|
296
320
|
else:
|
|
297
321
|
raise ValueError(f"Invalid message type: {type}")
|
|
298
322
|
|
mito_ai/completions/models.py
CHANGED
|
@@ -125,15 +125,33 @@ If the user has requested data that you believe is stored in the database:
|
|
|
125
125
|
connections[connection_name]["username"]
|
|
126
126
|
```
|
|
127
127
|
|
|
128
|
+
- The user may colloquially ask for a "list of x", always assume they want a pandas DataFrame.
|
|
129
|
+
- When working with dataframes created from an SQL query, ALWAYS use lowercase column names.
|
|
130
|
+
- If you think the requested data is stored in the database, but you are unsure, then ask the user for clarification.
|
|
131
|
+
|
|
132
|
+
## Additional MSSQL Rules
|
|
133
|
+
|
|
134
|
+
- When connecting to a Microsoft SQL Server (MSSQL) database, use the following format:
|
|
135
|
+
|
|
136
|
+
```
|
|
137
|
+
import urllib.parse
|
|
138
|
+
|
|
139
|
+
encoded_password = urllib.parse.quote_plus(password)
|
|
140
|
+
conn_str = f"mssql+pyodbc://username:encoded_password@host:port/database?driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
- Always URL-encode passwords for MSSQL connections to handle special characters properly.
|
|
144
|
+
- Include the port number in MSSQL connection strings.
|
|
145
|
+
- Use "ODBC+Driver+18+for+SQL+Server" (with plus signs) in the driver parameter.
|
|
146
|
+
- Always include "TrustServerCertificate=yes" for MSSQL connections to avoid SSL certificate issues.
|
|
147
|
+
|
|
148
|
+
## Additional Oracle Rules
|
|
149
|
+
|
|
128
150
|
- When connecting to an Oracle database, use the following format:
|
|
129
151
|
```
|
|
130
152
|
conn_str = f"oracle+oracledb://username:password@host:port?service_name=service_name"
|
|
131
153
|
```
|
|
132
154
|
|
|
133
|
-
- The user may colloquially ask for a "list of x", always assume they want a pandas DataFrame.
|
|
134
|
-
- When working with dataframes created from an SQL query, ALWAYS use lowercase column names.
|
|
135
|
-
- If you think the requested data is stored in the database, but you are unsure, then ask the user for clarification.
|
|
136
|
-
|
|
137
155
|
Here is the schema:
|
|
138
156
|
{schemas}
|
|
139
157
|
"""
|