mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import time
|
|
7
|
+
import uuid
|
|
8
|
+
from dataclasses import asdict
|
|
9
|
+
from http import HTTPStatus
|
|
10
|
+
from typing import Any, Dict, Optional, Union
|
|
11
|
+
import tornado
|
|
12
|
+
import tornado.ioloop
|
|
13
|
+
import tornado.web
|
|
14
|
+
from jupyter_core.utils import ensure_async
|
|
15
|
+
from jupyter_server.base.handlers import JupyterHandler
|
|
16
|
+
from tornado.websocket import WebSocketHandler
|
|
17
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
18
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
19
|
+
from mito_ai.logger import get_logger
|
|
20
|
+
from mito_ai.completions.models import (
|
|
21
|
+
AgentSmartDebugMetadata,
|
|
22
|
+
CompletionError,
|
|
23
|
+
CompletionItem,
|
|
24
|
+
CompletionReply,
|
|
25
|
+
CompletionRequest,
|
|
26
|
+
CompletionStreamChunk,
|
|
27
|
+
ErrorMessage,
|
|
28
|
+
FetchHistoryReply,
|
|
29
|
+
StartNewChatReply,
|
|
30
|
+
FetchThreadsReply,
|
|
31
|
+
DeleteThreadReply,
|
|
32
|
+
ChatMessageMetadata,
|
|
33
|
+
SmartDebugMetadata,
|
|
34
|
+
CodeExplainMetadata,
|
|
35
|
+
AgentExecutionMetadata,
|
|
36
|
+
InlineCompleterMetadata,
|
|
37
|
+
MessageType
|
|
38
|
+
)
|
|
39
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
40
|
+
from mito_ai.utils.create import initialize_user
|
|
41
|
+
from mito_ai.utils.version_utils import is_pro
|
|
42
|
+
from mito_ai.completions.completion_handlers.chat_completion_handler import get_chat_completion, stream_chat_completion
|
|
43
|
+
from mito_ai.completions.completion_handlers.smart_debug_handler import get_smart_debug_completion, stream_smart_debug_completion
|
|
44
|
+
from mito_ai.completions.completion_handlers.code_explain_handler import get_code_explain_completion, stream_code_explain_completion
|
|
45
|
+
from mito_ai.completions.completion_handlers.inline_completer_handler import get_inline_completion
|
|
46
|
+
from mito_ai.completions.completion_handlers.agent_execution_handler import get_agent_execution_completion
|
|
47
|
+
from mito_ai.completions.completion_handlers.agent_auto_error_fixup_handler import get_agent_auto_error_fixup_completion
|
|
48
|
+
from mito_ai.utils.telemetry_utils import identify
|
|
49
|
+
|
|
50
|
+
FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
|
|
51
|
+
|
|
52
|
+
# The GlobalMessageHistory is now created in __init__.py and passed to handlers
|
|
53
|
+
# to ensure there's only one instance managing the .mito/ai-chats directory locks
|
|
54
|
+
|
|
55
|
+
# This handler is responsible for the mito_ai/completions endpoint.
|
|
56
|
+
# It takes a message from the user, sends it to the OpenAI API, and returns the response.
|
|
57
|
+
# Important: Because this is a server extension, print statements are sent to the
|
|
58
|
+
# jupyter server terminal by default (ie: the terminal you ran `jupyter lab`)
|
|
59
|
+
class CompletionHandler(JupyterHandler, WebSocketHandler):
|
|
60
|
+
"""Completion websocket handler."""
|
|
61
|
+
|
|
62
|
+
def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
|
|
63
|
+
super().initialize()
|
|
64
|
+
self.log.debug("Initializing websocket connection %s", self.request.path)
|
|
65
|
+
self._llm = llm
|
|
66
|
+
self._message_history = message_history
|
|
67
|
+
self.is_pro = is_pro()
|
|
68
|
+
self._selected_model = FALLBACK_MODEL
|
|
69
|
+
self.is_electron = False
|
|
70
|
+
identify(llm.key_type)
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def log(self) -> logging.Logger:
|
|
74
|
+
"""Use Mito AI logger"""
|
|
75
|
+
return get_logger()
|
|
76
|
+
|
|
77
|
+
async def pre_get(self) -> None:
|
|
78
|
+
"""Handles websocket authentication/authorization."""
|
|
79
|
+
# authenticate the request before opening the websocket
|
|
80
|
+
user = self.current_user
|
|
81
|
+
if user is None:
|
|
82
|
+
self.log.warning("Couldn't authenticate WebSocket connection")
|
|
83
|
+
raise tornado.web.HTTPError(HTTPStatus.UNAUTHORIZED)
|
|
84
|
+
|
|
85
|
+
# authorize the user.
|
|
86
|
+
if not await ensure_async(
|
|
87
|
+
self.authorizer.is_authorized(self, user, "execute", "mito_ai-completion")
|
|
88
|
+
):
|
|
89
|
+
raise tornado.web.HTTPError(HTTPStatus.FORBIDDEN)
|
|
90
|
+
|
|
91
|
+
async def get(self, *args: Any, **kwargs: Dict[str, Any]) -> None:
|
|
92
|
+
"""Get an event to open a socket or check service availability."""
|
|
93
|
+
# Check if this is just a service availability check
|
|
94
|
+
if self.get_query_argument('check_availability', None) == 'true':
|
|
95
|
+
self.set_status(HTTPStatus.OK)
|
|
96
|
+
self.finish()
|
|
97
|
+
return
|
|
98
|
+
|
|
99
|
+
await ensure_async(self.pre_get()) # type: ignore
|
|
100
|
+
|
|
101
|
+
initialize_user()
|
|
102
|
+
|
|
103
|
+
reply = super().get(*args, **kwargs)
|
|
104
|
+
if reply is not None:
|
|
105
|
+
await reply
|
|
106
|
+
|
|
107
|
+
def on_close(self) -> None:
|
|
108
|
+
"""Invoked when the WebSocket is closed.
|
|
109
|
+
|
|
110
|
+
If the connection was closed cleanly and a status code or reason
|
|
111
|
+
phrase was supplied, these values will be available as the attributes
|
|
112
|
+
``self.close_code`` and ``self.close_reason``.
|
|
113
|
+
"""
|
|
114
|
+
# Stop observing the provider error
|
|
115
|
+
self._llm.unobserve(self._send_error, "last_error")
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
async def on_message(self, message: str) -> None: # type: ignore
|
|
119
|
+
"""Handle incoming messages on the WebSocket.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
message: The message received on the WebSocket.
|
|
123
|
+
"""
|
|
124
|
+
start = time.time()
|
|
125
|
+
self.log.debug("Message received: %s", message)
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
parsed_message = json.loads(message)
|
|
129
|
+
metadata_dict = parsed_message.get('metadata', {})
|
|
130
|
+
type: MessageType = MessageType(parsed_message.get('type'))
|
|
131
|
+
|
|
132
|
+
# Extract environment information from the message
|
|
133
|
+
environment = parsed_message.get('environment', {})
|
|
134
|
+
if environment:
|
|
135
|
+
is_electron = environment.get('isElectron', None)
|
|
136
|
+
if is_electron is not None:
|
|
137
|
+
if is_electron != self.is_electron:
|
|
138
|
+
# If the is_electron status is different, log it
|
|
139
|
+
identify(key_type=self._llm.key_type, is_electron=is_electron)
|
|
140
|
+
|
|
141
|
+
self.is_electron = is_electron
|
|
142
|
+
|
|
143
|
+
except ValueError as e:
|
|
144
|
+
self.log.error("Invalid completion request.", exc_info=e)
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
reply: Union[StartNewChatReply, FetchThreadsReply, DeleteThreadReply, FetchHistoryReply, CompletionReply]
|
|
148
|
+
|
|
149
|
+
# Clear history if the type is "start_new_chat"
|
|
150
|
+
if type == MessageType.START_NEW_CHAT:
|
|
151
|
+
thread_id = self._message_history.create_new_thread()
|
|
152
|
+
|
|
153
|
+
reply = StartNewChatReply(
|
|
154
|
+
parent_id=parsed_message.get("message_id"),
|
|
155
|
+
thread_id=thread_id
|
|
156
|
+
)
|
|
157
|
+
self.reply(reply)
|
|
158
|
+
return
|
|
159
|
+
|
|
160
|
+
# Handle get_threads: return list of chat threads
|
|
161
|
+
if type == MessageType.GET_THREADS:
|
|
162
|
+
threads = self._message_history.get_threads()
|
|
163
|
+
reply = FetchThreadsReply(
|
|
164
|
+
parent_id=parsed_message.get("message_id"),
|
|
165
|
+
threads=threads
|
|
166
|
+
)
|
|
167
|
+
self.reply(reply)
|
|
168
|
+
return
|
|
169
|
+
|
|
170
|
+
# Handle delete_thread: delete the specified thread
|
|
171
|
+
if type == MessageType.DELETE_THREAD:
|
|
172
|
+
thread_id_to_delete = metadata_dict.get('thread_id')
|
|
173
|
+
if thread_id_to_delete:
|
|
174
|
+
is_thread_deleted = self._message_history.delete_thread(thread_id_to_delete)
|
|
175
|
+
reply = DeleteThreadReply(
|
|
176
|
+
parent_id=parsed_message.get("message_id"),
|
|
177
|
+
success=is_thread_deleted
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
reply = DeleteThreadReply(
|
|
181
|
+
parent_id=parsed_message.get("message_id"),
|
|
182
|
+
success=False
|
|
183
|
+
)
|
|
184
|
+
self.reply(reply)
|
|
185
|
+
return
|
|
186
|
+
if type == MessageType.FETCH_HISTORY:
|
|
187
|
+
|
|
188
|
+
# If a thread_id is provided, use that thread's history; otherwise, use newest.
|
|
189
|
+
thread_id = metadata_dict.get('thread_id')
|
|
190
|
+
display_history = self._message_history.get_display_history(thread_id)
|
|
191
|
+
|
|
192
|
+
reply = FetchHistoryReply(
|
|
193
|
+
parent_id=parsed_message.get('message_id'),
|
|
194
|
+
items=display_history
|
|
195
|
+
)
|
|
196
|
+
self.reply(reply)
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
# Updated handler for receiving model selection via websocket
|
|
200
|
+
if type == MessageType.UPDATE_MODEL_CONFIG:
|
|
201
|
+
model = metadata_dict.get('model')
|
|
202
|
+
if model:
|
|
203
|
+
self._selected_model = model
|
|
204
|
+
self.log.info(f"Model updated to: {model}")
|
|
205
|
+
reply = CompletionReply(
|
|
206
|
+
items=[CompletionItem(content=f"Model updated to {model}", isIncomplete=False)],
|
|
207
|
+
parent_id=parsed_message.get('message_id')
|
|
208
|
+
)
|
|
209
|
+
self.reply(reply)
|
|
210
|
+
else:
|
|
211
|
+
error = CompletionError(
|
|
212
|
+
error_type="InvalidModelConfig",
|
|
213
|
+
title="Invalid model configuration",
|
|
214
|
+
traceback="",
|
|
215
|
+
hint="Model name is required"
|
|
216
|
+
)
|
|
217
|
+
reply = CompletionReply(
|
|
218
|
+
items=[],
|
|
219
|
+
error=error,
|
|
220
|
+
parent_id=parsed_message.get('message_id')
|
|
221
|
+
)
|
|
222
|
+
self.reply(reply)
|
|
223
|
+
return
|
|
224
|
+
|
|
225
|
+
if type == MessageType.STOP_AGENT:
|
|
226
|
+
thread_id_to_stop = metadata_dict.get('threadId')
|
|
227
|
+
if thread_id_to_stop:
|
|
228
|
+
self.log.info(f"Stopping agent, thread ID: {thread_id_to_stop}")
|
|
229
|
+
|
|
230
|
+
ai_optimized_message: ChatCompletionMessageParam = {
|
|
231
|
+
"role": "assistant",
|
|
232
|
+
"content": "The user made the following request: Stop processing my last request. I want to change it. Please answer my future requests without going back and finising my previous request."
|
|
233
|
+
}
|
|
234
|
+
display_optimized_message: ChatCompletionMessageParam = {
|
|
235
|
+
"role": "assistant",
|
|
236
|
+
"content": "Agent interupted by user "
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
await self._message_history.append_message(
|
|
240
|
+
ai_optimized_message=ai_optimized_message,
|
|
241
|
+
display_message=display_optimized_message,
|
|
242
|
+
model=self._selected_model,
|
|
243
|
+
llm_provider=self._llm,
|
|
244
|
+
thread_id=thread_id_to_stop
|
|
245
|
+
)
|
|
246
|
+
else:
|
|
247
|
+
self.log.info("Trying to stop agent, but no thread ID available")
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
try:
|
|
251
|
+
# Get completion based on message type
|
|
252
|
+
completion = None
|
|
253
|
+
message_id = parsed_message.get('message_id')
|
|
254
|
+
stream = parsed_message.get('stream')
|
|
255
|
+
|
|
256
|
+
# When handling completions, always use the selected model
|
|
257
|
+
model = self._selected_model
|
|
258
|
+
if type == MessageType.CHAT:
|
|
259
|
+
chat_metadata = ChatMessageMetadata(**metadata_dict)
|
|
260
|
+
|
|
261
|
+
# Handle streaming if requested and available
|
|
262
|
+
if stream:
|
|
263
|
+
# Use stream_chat_completion to stream the response
|
|
264
|
+
await stream_chat_completion(
|
|
265
|
+
chat_metadata,
|
|
266
|
+
self._llm,
|
|
267
|
+
self._message_history,
|
|
268
|
+
message_id,
|
|
269
|
+
self.reply,
|
|
270
|
+
model
|
|
271
|
+
)
|
|
272
|
+
return
|
|
273
|
+
else:
|
|
274
|
+
# Regular non-streaming completion
|
|
275
|
+
completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
|
|
276
|
+
elif type == MessageType.SMART_DEBUG:
|
|
277
|
+
smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
|
|
278
|
+
# Handle streaming if requested and available
|
|
279
|
+
if stream:
|
|
280
|
+
# Use stream_smart_debug_completion to stream the response
|
|
281
|
+
await stream_smart_debug_completion(
|
|
282
|
+
smart_debug_metadata,
|
|
283
|
+
self._llm,
|
|
284
|
+
self._message_history,
|
|
285
|
+
message_id,
|
|
286
|
+
self.reply,
|
|
287
|
+
model
|
|
288
|
+
)
|
|
289
|
+
return
|
|
290
|
+
else:
|
|
291
|
+
# Regular non-streaming completion
|
|
292
|
+
completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
|
|
293
|
+
elif type == MessageType.CODE_EXPLAIN:
|
|
294
|
+
code_explain_metadata = CodeExplainMetadata(**metadata_dict)
|
|
295
|
+
|
|
296
|
+
# Handle streaming if requested and available
|
|
297
|
+
if stream:
|
|
298
|
+
# Use stream_code_explain_completion to stream the response
|
|
299
|
+
await stream_code_explain_completion(
|
|
300
|
+
code_explain_metadata,
|
|
301
|
+
self._llm,
|
|
302
|
+
self._message_history,
|
|
303
|
+
message_id,
|
|
304
|
+
self.reply,
|
|
305
|
+
model
|
|
306
|
+
)
|
|
307
|
+
return
|
|
308
|
+
else:
|
|
309
|
+
# Regular non-streaming completion
|
|
310
|
+
completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
|
|
311
|
+
elif type == MessageType.AGENT_EXECUTION:
|
|
312
|
+
agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
|
|
313
|
+
completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
|
|
314
|
+
elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
|
|
315
|
+
agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
|
|
316
|
+
completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
|
|
317
|
+
elif type == MessageType.INLINE_COMPLETION:
|
|
318
|
+
inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
|
|
319
|
+
completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
|
|
320
|
+
else:
|
|
321
|
+
raise ValueError(f"Invalid message type: {type}")
|
|
322
|
+
|
|
323
|
+
# Create and send reply
|
|
324
|
+
reply = CompletionReply(
|
|
325
|
+
items=[CompletionItem(content=completion, isIncomplete=False)],
|
|
326
|
+
parent_id=message_id
|
|
327
|
+
)
|
|
328
|
+
self.reply(reply)
|
|
329
|
+
|
|
330
|
+
latency_ms = round((time.time() - start) * 1000)
|
|
331
|
+
self.log.info(f"Completion handler resolved in {latency_ms} ms.")
|
|
332
|
+
|
|
333
|
+
except Exception as e:
|
|
334
|
+
error = CompletionError.from_exception(e)
|
|
335
|
+
self._send_error({"new": error})
|
|
336
|
+
reply = CompletionReply(
|
|
337
|
+
items=[],
|
|
338
|
+
error=error,
|
|
339
|
+
parent_id=parsed_message.get('message_id')
|
|
340
|
+
)
|
|
341
|
+
self.reply(reply)
|
|
342
|
+
|
|
343
|
+
def open(self, *args: str, **kwargs: str) -> None:
|
|
344
|
+
"""Invoked when a new WebSocket is opened.
|
|
345
|
+
|
|
346
|
+
The arguments to `open` are extracted from the `tornado.web.URLSpec`
|
|
347
|
+
regular expression, just like the arguments to
|
|
348
|
+
`tornado.web.RequestHandler.get`.
|
|
349
|
+
|
|
350
|
+
`open` may be a coroutine. `on_message` will not be called until
|
|
351
|
+
`open` has returned.
|
|
352
|
+
"""
|
|
353
|
+
if self._llm.last_error:
|
|
354
|
+
self._send_error({"new": self._llm.last_error})
|
|
355
|
+
# Start observing the provider error
|
|
356
|
+
self._llm.observe(self._send_error, "last_error")
|
|
357
|
+
# Send the server capabilities to the client.
|
|
358
|
+
self.reply(self._llm.capabilities)
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
async def handle_exception(self, e: Exception, request: CompletionRequest) -> None:
|
|
362
|
+
"""
|
|
363
|
+
Handles an exception raised in either ``handle_request`` or
|
|
364
|
+
``handle_stream_request``.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
e: The exception raised.
|
|
368
|
+
request: The completion request that caused the exception.
|
|
369
|
+
"""
|
|
370
|
+
hint = ""
|
|
371
|
+
if isinstance(e, PermissionError):
|
|
372
|
+
hint = "You've reached the free tier limit for Mito AI. Upgrade to Pro for unlimited uses or supply your own OpenAI API key."
|
|
373
|
+
elif "openai" in self._llm.capabilities.provider.lower():
|
|
374
|
+
hint = "There was an error communicating with OpenAI. This might be due to a temporary OpenAI outage, a problem with your internet connection, or an incorrect API key. Please try again."
|
|
375
|
+
else:
|
|
376
|
+
hint = "There was an error communicating with Mito server. This might be due to a temporary server outage or a problem with your internet connection. Please try again."
|
|
377
|
+
|
|
378
|
+
error: CompletionError = CompletionError.from_exception(e, hint=hint)
|
|
379
|
+
self._send_error({"new": error})
|
|
380
|
+
|
|
381
|
+
reply: Union[CompletionStreamChunk, CompletionReply]
|
|
382
|
+
if request.stream:
|
|
383
|
+
reply = CompletionStreamChunk(
|
|
384
|
+
chunk=CompletionItem(content="", isIncomplete=True),
|
|
385
|
+
parent_id=request.message_id,
|
|
386
|
+
done=True,
|
|
387
|
+
error=error,
|
|
388
|
+
)
|
|
389
|
+
else:
|
|
390
|
+
reply = CompletionReply(
|
|
391
|
+
items=[],
|
|
392
|
+
error=error,
|
|
393
|
+
parent_id=request.message_id,
|
|
394
|
+
)
|
|
395
|
+
self.reply(reply)
|
|
396
|
+
|
|
397
|
+
def reply(self, reply: Any) -> None:
|
|
398
|
+
"""Write a reply object to the WebSocket connection.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
reply: The completion reply object.
|
|
402
|
+
It must be a dataclass instance.
|
|
403
|
+
"""
|
|
404
|
+
message = asdict(reply)
|
|
405
|
+
super().write_message(message)
|
|
406
|
+
|
|
407
|
+
def _send_error(self, change: Dict[str, Any]) -> None:
|
|
408
|
+
"""Send an error message to the client."""
|
|
409
|
+
error = change["new"]
|
|
410
|
+
|
|
411
|
+
self.reply(
|
|
412
|
+
ErrorMessage(**asdict(error))
|
|
413
|
+
if error is not None
|
|
414
|
+
else ErrorMessage(error_type="", title="No error", traceback="")
|
|
415
|
+
)
|