mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
|
|
6
|
+
def convert_utc_to_local_time(time_str: str) -> str:
|
|
7
|
+
"""Convert UTC time to a user's local time"""
|
|
8
|
+
try:
|
|
9
|
+
# Remove the 'Z' suffix and parse the UTC datetime
|
|
10
|
+
utc_time_str = time_str.rstrip('Z')
|
|
11
|
+
utc_time = datetime.fromisoformat(utc_time_str)
|
|
12
|
+
|
|
13
|
+
# Set timezone to UTC
|
|
14
|
+
utc_time = utc_time.replace(tzinfo=timezone.utc)
|
|
15
|
+
|
|
16
|
+
# Convert to local timezone (system timezone)
|
|
17
|
+
local_time = utc_time.astimezone()
|
|
18
|
+
|
|
19
|
+
# Format as 'MMM DD HH:MM'
|
|
20
|
+
return local_time.strftime('%m-%d-%Y %H:%M')
|
|
21
|
+
|
|
22
|
+
except (ValueError, AttributeError) as e:
|
|
23
|
+
# Return original string if parsing fails
|
|
24
|
+
return time_str
|
mito_ai/auth/README.md
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# Authorization
|
|
2
|
+
|
|
3
|
+
### Authorization Code Flow:
|
|
4
|
+
|
|
5
|
+
1. User clicks "Sign In" → then they are redirected to Cognito's hosted UI
|
|
6
|
+
2. User authenticates → then Cognito redirects back with an authorization code
|
|
7
|
+
3. Our backend then exchanges the code for JWT tokens
|
|
8
|
+
|
|
9
|
+
The authorization code in step2 is a short-lived, one-time use code.
|
|
10
|
+
To exchange the authorization code for tokens, we have to make a POST request to Cognito's token endpoint:
|
|
11
|
+
POST https://your-domain.auth.region.amazoncognito.com/oauth2/token
|
|
12
|
+
|
|
13
|
+
The JWT tokens received will provide an hour's session for the user for the next deployments, without having to re-login.
|
|
14
|
+
|
|
15
|
+
### The response from this request contains 3 JWT tokens:
|
|
16
|
+
1. Access Token - Used to call APIs (expires in 1 hour by default)
|
|
17
|
+
2. ID Token - Contains user identity information (name, email, etc.)
|
|
18
|
+
3. Refresh Token - Used to get new access/ID tokens when they expire
|
mito_ai/auth/__init__.py
ADDED
mito_ai/auth/handlers.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
import requests
|
|
7
|
+
import tornado
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from jupyter_server.base.handlers import APIHandler
|
|
10
|
+
from mito_ai.logger import get_logger
|
|
11
|
+
from mito_ai.constants import ACTIVE_COGNITO_CONFIG
|
|
12
|
+
from typing import Dict, Any
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AuthHandler(APIHandler):
|
|
16
|
+
"""Handler for authentication operations."""
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def log(self) -> logging.Logger:
|
|
20
|
+
"""Use Mito AI logger."""
|
|
21
|
+
return get_logger()
|
|
22
|
+
|
|
23
|
+
@tornado.web.authenticated
|
|
24
|
+
def post(self) -> None:
|
|
25
|
+
"""Exchange authorization code for JWT tokens."""
|
|
26
|
+
try:
|
|
27
|
+
data = json.loads(self.request.body)
|
|
28
|
+
code = data.get('code')
|
|
29
|
+
|
|
30
|
+
if not code:
|
|
31
|
+
self.set_status(400)
|
|
32
|
+
self.finish(json.dumps({"error": "Authorization code is required"}))
|
|
33
|
+
return
|
|
34
|
+
|
|
35
|
+
# Exchange authorization code for tokens
|
|
36
|
+
token_response = self._exchange_code_for_tokens(code)
|
|
37
|
+
|
|
38
|
+
if token_response.get('error'):
|
|
39
|
+
self.set_status(400)
|
|
40
|
+
self.finish(json.dumps({"error": token_response['error']}))
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
# Return the tokens to the client
|
|
44
|
+
self.finish(json.dumps({
|
|
45
|
+
"access_token": token_response.get('access_token'),
|
|
46
|
+
"id_token": token_response.get('id_token'),
|
|
47
|
+
"refresh_token": token_response.get('refresh_token'),
|
|
48
|
+
"expires_in": token_response.get('expires_in')
|
|
49
|
+
}))
|
|
50
|
+
|
|
51
|
+
except json.JSONDecodeError:
|
|
52
|
+
self.set_status(400)
|
|
53
|
+
self.finish(json.dumps({"error": "Invalid JSON in request body"}))
|
|
54
|
+
except Exception as e:
|
|
55
|
+
self.log.error(f"Error in auth handler: {e}")
|
|
56
|
+
self.set_status(500)
|
|
57
|
+
self.finish(json.dumps({"error": "Internal server error"}))
|
|
58
|
+
|
|
59
|
+
def _exchange_code_for_tokens(self, code: str) -> Dict[str, Any]:
|
|
60
|
+
"""Exchange authorization code for JWT tokens using AWS Cognito."""
|
|
61
|
+
try:
|
|
62
|
+
# Prepare the token request
|
|
63
|
+
token_data = {
|
|
64
|
+
'grant_type': 'authorization_code',
|
|
65
|
+
'client_id': ACTIVE_COGNITO_CONFIG['CLIENT_ID'],
|
|
66
|
+
'code': code,
|
|
67
|
+
'redirect_uri': ACTIVE_COGNITO_CONFIG['REDIRECT_URI']
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
# Add client secret if configured
|
|
71
|
+
if ACTIVE_COGNITO_CONFIG['CLIENT_SECRET']:
|
|
72
|
+
token_data['client_secret'] = ACTIVE_COGNITO_CONFIG['CLIENT_SECRET']
|
|
73
|
+
|
|
74
|
+
# Make the token request
|
|
75
|
+
response = requests.post(
|
|
76
|
+
ACTIVE_COGNITO_CONFIG['TOKEN_ENDPOINT'],
|
|
77
|
+
data=token_data,
|
|
78
|
+
headers={'Content-Type': 'application/x-www-form-urlencoded'}
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
if response.status_code == 200:
|
|
82
|
+
token_response: Dict[str, Any] = response.json()
|
|
83
|
+
|
|
84
|
+
current_time = datetime.now(timezone.utc)
|
|
85
|
+
self.log.info(f"Token exchange successful at {current_time.isoformat()}")
|
|
86
|
+
return token_response
|
|
87
|
+
else:
|
|
88
|
+
self.log.error(f"Token exchange failed: {response.status_code} - {response.text}")
|
|
89
|
+
return {"error": "Failed to exchange authorization code for tokens"}
|
|
90
|
+
|
|
91
|
+
except requests.exceptions.RequestException as e:
|
|
92
|
+
self.log.error(f"Request error during token exchange: {e}")
|
|
93
|
+
return {"error": "Network error during token exchange"}
|
|
94
|
+
except Exception as e:
|
|
95
|
+
self.log.error(f"Unexpected error during token exchange: {e}")
|
|
96
|
+
return {"error": "Unexpected error during token exchange"}
|
mito_ai/auth/urls.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Tuple
|
|
5
|
+
from jupyter_server.utils import url_path_join
|
|
6
|
+
from .handlers import AuthHandler
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_auth_urls(base_url: str) -> List[Tuple[str, type]]:
|
|
10
|
+
"""Get the auth URL patterns."""
|
|
11
|
+
return [
|
|
12
|
+
(url_path_join(base_url, "mito-ai", "auth", "token"), AuthHandler),
|
|
13
|
+
]
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import tornado
|
|
5
|
+
from typing import List, Any
|
|
6
|
+
from jupyter_server.base.handlers import APIHandler
|
|
7
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
8
|
+
from mito_ai.completions.models import ChatThreadMetadata
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ChatHistoryHandler(APIHandler):
|
|
12
|
+
"""
|
|
13
|
+
Endpoints for working with chat history threads.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def initialize(self, message_history: GlobalMessageHistory) -> None:
|
|
17
|
+
"""Initialize the handler with the global message history instance."""
|
|
18
|
+
super().initialize()
|
|
19
|
+
self._message_history = message_history
|
|
20
|
+
|
|
21
|
+
@tornado.web.authenticated
|
|
22
|
+
def get(self, *args: Any, **kwargs: Any) -> None:
|
|
23
|
+
"""Get all chat history threads or a specific thread by ID."""
|
|
24
|
+
try:
|
|
25
|
+
# Check if a specific thread ID is provided in the URL
|
|
26
|
+
thread_id = kwargs.get("thread_id")
|
|
27
|
+
|
|
28
|
+
if thread_id:
|
|
29
|
+
# Get specific thread
|
|
30
|
+
if thread_id in self._message_history._chat_threads:
|
|
31
|
+
thread = self._message_history._chat_threads[thread_id]
|
|
32
|
+
thread_data = {
|
|
33
|
+
"thread_id": thread.thread_id,
|
|
34
|
+
"name": thread.name,
|
|
35
|
+
"creation_ts": thread.creation_ts,
|
|
36
|
+
"last_interaction_ts": thread.last_interaction_ts,
|
|
37
|
+
"display_history": thread.display_history,
|
|
38
|
+
"ai_optimized_history": thread.ai_optimized_history,
|
|
39
|
+
}
|
|
40
|
+
self.finish(thread_data)
|
|
41
|
+
else:
|
|
42
|
+
self.set_status(404)
|
|
43
|
+
self.finish({"error": f"Thread with ID {thread_id} not found"})
|
|
44
|
+
else:
|
|
45
|
+
# Get all threads
|
|
46
|
+
threads: List[ChatThreadMetadata] = self._message_history.get_threads()
|
|
47
|
+
|
|
48
|
+
# Convert to dict format for JSON serialization
|
|
49
|
+
threads_data = [
|
|
50
|
+
{
|
|
51
|
+
"thread_id": thread.thread_id,
|
|
52
|
+
"name": thread.name,
|
|
53
|
+
"creation_ts": thread.creation_ts,
|
|
54
|
+
"last_interaction_ts": thread.last_interaction_ts,
|
|
55
|
+
}
|
|
56
|
+
for thread in threads
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
self.finish({"threads": threads_data})
|
|
60
|
+
|
|
61
|
+
except Exception as e:
|
|
62
|
+
self.set_status(500)
|
|
63
|
+
self.finish({"error": str(e)})
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Tuple, Any
|
|
5
|
+
from jupyter_server.utils import url_path_join
|
|
6
|
+
from mito_ai.chat_history.handlers import ChatHistoryHandler
|
|
7
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_chat_history_urls(base_url: str, message_history: GlobalMessageHistory) -> List[Tuple[str, Any, dict]]:
|
|
11
|
+
"""Get all chat history related URL patterns.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
base_url: The base URL for the Jupyter server
|
|
15
|
+
message_history: The global message history instance
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
List of (url_pattern, handler_class, handler_kwargs) tuples
|
|
19
|
+
"""
|
|
20
|
+
BASE_URL = base_url + "/mito-ai/chat-history"
|
|
21
|
+
return [
|
|
22
|
+
(
|
|
23
|
+
url_path_join(BASE_URL, "threads"),
|
|
24
|
+
ChatHistoryHandler,
|
|
25
|
+
{"message_history": message_history},
|
|
26
|
+
),
|
|
27
|
+
(
|
|
28
|
+
url_path_join(BASE_URL, "threads", "(?P<thread_id>[^/]+)"),
|
|
29
|
+
ChatHistoryHandler,
|
|
30
|
+
{"message_history": message_history},
|
|
31
|
+
),
|
|
32
|
+
]
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
5
|
+
from mito_ai.completions.models import AgentResponse, AgentSmartDebugMetadata, MessageType, ResponseFormatInfo
|
|
6
|
+
from mito_ai.completions.prompt_builders.agent_smart_debug_prompt import create_agent_smart_debug_prompt
|
|
7
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
8
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
9
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
10
|
+
from mito_ai.completions.completion_handlers.utils import append_agent_system_message
|
|
11
|
+
|
|
12
|
+
__all__ = ["get_agent_auto_error_fixup_completion"]
|
|
13
|
+
|
|
14
|
+
class AgentAutoErrorFixupHandler(CompletionHandler[AgentSmartDebugMetadata]):
|
|
15
|
+
"""Handler for agent auto error fixup completions."""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
async def get_completion(
|
|
19
|
+
metadata: AgentSmartDebugMetadata,
|
|
20
|
+
provider: OpenAIProvider,
|
|
21
|
+
message_history: GlobalMessageHistory,
|
|
22
|
+
model: str
|
|
23
|
+
) -> str:
|
|
24
|
+
"""Get an agent auto error fixup completion from the AI provider."""
|
|
25
|
+
|
|
26
|
+
# Add the system message if it doesn't alredy exist
|
|
27
|
+
await append_agent_system_message(message_history, model, provider, metadata.threadId, metadata.isChromeBrowser)
|
|
28
|
+
|
|
29
|
+
# Create the prompt
|
|
30
|
+
prompt = create_agent_smart_debug_prompt(metadata)
|
|
31
|
+
display_prompt = metadata.errorMessage
|
|
32
|
+
|
|
33
|
+
# Add the prompt to the message history
|
|
34
|
+
new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
|
|
35
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
36
|
+
|
|
37
|
+
await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
|
|
38
|
+
|
|
39
|
+
# Get the completion
|
|
40
|
+
completion = await provider.request_completions(
|
|
41
|
+
messages=message_history.get_ai_optimized_history(metadata.threadId),
|
|
42
|
+
model=model,
|
|
43
|
+
response_format_info=ResponseFormatInfo(
|
|
44
|
+
name='agent_response',
|
|
45
|
+
format=AgentResponse
|
|
46
|
+
),
|
|
47
|
+
message_type=MessageType.AGENT_AUTO_ERROR_FIXUP,
|
|
48
|
+
user_input=metadata.errorMessage,
|
|
49
|
+
thread_id=metadata.threadId
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
|
|
53
|
+
|
|
54
|
+
await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
|
|
55
|
+
|
|
56
|
+
return completion
|
|
57
|
+
|
|
58
|
+
# Use the static method directly
|
|
59
|
+
get_agent_auto_error_fixup_completion = AgentAutoErrorFixupHandler.get_completion
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Literal, Union
|
|
5
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
6
|
+
from mito_ai.completions.models import AgentExecutionMetadata, MessageType, ResponseFormatInfo, AgentResponse
|
|
7
|
+
from mito_ai.completions.prompt_builders.agent_execution_prompt import create_agent_execution_prompt
|
|
8
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
9
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
10
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
11
|
+
from mito_ai.completions.completion_handlers.utils import append_agent_system_message, create_ai_optimized_message
|
|
12
|
+
|
|
13
|
+
__all__ = ["get_agent_execution_completion"]
|
|
14
|
+
|
|
15
|
+
class AgentExecutionHandler(CompletionHandler[AgentExecutionMetadata]):
|
|
16
|
+
"""Handler for agent execution completions."""
|
|
17
|
+
|
|
18
|
+
@staticmethod
|
|
19
|
+
async def get_completion(
|
|
20
|
+
metadata: AgentExecutionMetadata,
|
|
21
|
+
provider: OpenAIProvider,
|
|
22
|
+
message_history: GlobalMessageHistory,
|
|
23
|
+
model: str
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Get an agent execution completion from the AI provider."""
|
|
26
|
+
|
|
27
|
+
if metadata.index is not None:
|
|
28
|
+
message_history.truncate_histories(
|
|
29
|
+
thread_id=metadata.threadId,
|
|
30
|
+
index=metadata.index
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Add the system message if it doesn't alredy exist
|
|
34
|
+
await append_agent_system_message(message_history, model, provider, metadata.threadId, metadata.isChromeBrowser)
|
|
35
|
+
|
|
36
|
+
# Create the prompt
|
|
37
|
+
prompt = create_agent_execution_prompt(metadata)
|
|
38
|
+
display_prompt = metadata.input
|
|
39
|
+
|
|
40
|
+
# Add the prompt to the message history
|
|
41
|
+
new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
|
|
42
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
43
|
+
|
|
44
|
+
await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
|
|
45
|
+
|
|
46
|
+
# Get the completion
|
|
47
|
+
completion = await provider.request_completions(
|
|
48
|
+
messages=message_history.get_ai_optimized_history(metadata.threadId),
|
|
49
|
+
model=model,
|
|
50
|
+
response_format_info=ResponseFormatInfo(
|
|
51
|
+
name='agent_response',
|
|
52
|
+
format=AgentResponse
|
|
53
|
+
),
|
|
54
|
+
message_type=MessageType.AGENT_EXECUTION,
|
|
55
|
+
user_input=metadata.input,
|
|
56
|
+
thread_id=metadata.threadId
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
|
|
60
|
+
|
|
61
|
+
await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
|
|
62
|
+
|
|
63
|
+
return completion
|
|
64
|
+
|
|
65
|
+
# Use the static method directly
|
|
66
|
+
get_agent_execution_completion = AgentExecutionHandler.get_completion
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Union, AsyncGenerator, Callable
|
|
5
|
+
|
|
6
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
7
|
+
from mito_ai.completions.models import ChatMessageMetadata, MessageType, CompletionRequest, CompletionStreamChunk, CompletionReply
|
|
8
|
+
from mito_ai.completions.prompt_builders.chat_prompt import create_chat_prompt
|
|
9
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
10
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
11
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
12
|
+
from mito_ai.completions.completion_handlers.utils import append_chat_system_message, create_ai_optimized_message
|
|
13
|
+
|
|
14
|
+
__all__ = ["get_chat_completion", "stream_chat_completion"]
|
|
15
|
+
|
|
16
|
+
class ChatCompletionHandler(CompletionHandler[ChatMessageMetadata]):
|
|
17
|
+
"""Handler for chat completions."""
|
|
18
|
+
|
|
19
|
+
@staticmethod
|
|
20
|
+
async def get_completion(
|
|
21
|
+
metadata: ChatMessageMetadata,
|
|
22
|
+
provider: OpenAIProvider,
|
|
23
|
+
message_history: GlobalMessageHistory,
|
|
24
|
+
model: str
|
|
25
|
+
) -> str:
|
|
26
|
+
"""Get a chat completion from the AI provider."""
|
|
27
|
+
|
|
28
|
+
if metadata.index is not None:
|
|
29
|
+
message_history.truncate_histories(
|
|
30
|
+
index=metadata.index,
|
|
31
|
+
thread_id=metadata.threadId
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Add the system message if it doesn't alredy exist
|
|
35
|
+
await append_chat_system_message(message_history, model, provider, metadata.threadId)
|
|
36
|
+
|
|
37
|
+
# Create the prompt
|
|
38
|
+
prompt = create_chat_prompt(
|
|
39
|
+
metadata.variables or [],
|
|
40
|
+
metadata.files or [],
|
|
41
|
+
metadata.activeCellCode,
|
|
42
|
+
metadata.activeCellId,
|
|
43
|
+
metadata.base64EncodedActiveCellOutput is not None and metadata.base64EncodedActiveCellOutput != '',
|
|
44
|
+
metadata.input,
|
|
45
|
+
metadata.additionalContext
|
|
46
|
+
)
|
|
47
|
+
display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
|
|
48
|
+
|
|
49
|
+
# Add the prompt to the message history
|
|
50
|
+
new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
|
|
51
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
52
|
+
await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
|
|
53
|
+
|
|
54
|
+
# Get the completion (non-streaming)
|
|
55
|
+
completion = await provider.request_completions(
|
|
56
|
+
messages=message_history.get_ai_optimized_history(metadata.threadId),
|
|
57
|
+
model=model,
|
|
58
|
+
message_type=MessageType.CHAT,
|
|
59
|
+
user_input=metadata.input,
|
|
60
|
+
thread_id=metadata.threadId
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
|
|
64
|
+
await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
|
|
65
|
+
|
|
66
|
+
return completion
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
async def stream_completion(
|
|
70
|
+
metadata: ChatMessageMetadata,
|
|
71
|
+
provider: OpenAIProvider,
|
|
72
|
+
message_history: GlobalMessageHistory,
|
|
73
|
+
message_id: str,
|
|
74
|
+
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
|
|
75
|
+
model: str
|
|
76
|
+
) -> str:
|
|
77
|
+
"""Stream chat completions from the AI provider.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
metadata: The metadata for the chat completion request.
|
|
81
|
+
provider: The AI provider to use.
|
|
82
|
+
message_history: The message history for this conversation.
|
|
83
|
+
message_id: The ID of the message being processed.
|
|
84
|
+
reply_fn: Function to call with each chunk for streaming replies.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
The accumulated response string.
|
|
88
|
+
"""
|
|
89
|
+
index = metadata.index
|
|
90
|
+
|
|
91
|
+
if index is not None:
|
|
92
|
+
message_history.truncate_histories(
|
|
93
|
+
index=index,
|
|
94
|
+
thread_id=metadata.threadId
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Add the system message if it doesn't already exist
|
|
98
|
+
await append_chat_system_message(message_history, model, provider, metadata.threadId)
|
|
99
|
+
|
|
100
|
+
# Create the prompt
|
|
101
|
+
prompt = create_chat_prompt(
|
|
102
|
+
metadata.variables or [],
|
|
103
|
+
metadata.files or [],
|
|
104
|
+
metadata.activeCellCode,
|
|
105
|
+
metadata.activeCellId,
|
|
106
|
+
metadata.base64EncodedActiveCellOutput is not None and metadata.base64EncodedActiveCellOutput != '',
|
|
107
|
+
metadata.input,
|
|
108
|
+
metadata.additionalContext
|
|
109
|
+
)
|
|
110
|
+
display_prompt = f"```python{metadata.activeCellCode or ''}```{metadata.input}"
|
|
111
|
+
|
|
112
|
+
# Add the prompt to the message history
|
|
113
|
+
new_ai_optimized_message = create_ai_optimized_message(prompt, metadata.base64EncodedActiveCellOutput, metadata.additionalContext)
|
|
114
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
115
|
+
await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
|
|
116
|
+
|
|
117
|
+
# Stream the completions using the provider's stream method
|
|
118
|
+
accumulated_response = await provider.stream_completions(
|
|
119
|
+
message_type=MessageType.CHAT,
|
|
120
|
+
messages=message_history.get_ai_optimized_history(metadata.threadId),
|
|
121
|
+
model=model,
|
|
122
|
+
message_id=message_id,
|
|
123
|
+
reply_fn=reply_fn,
|
|
124
|
+
user_input=metadata.input,
|
|
125
|
+
thread_id=metadata.threadId
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Save the accumulated response to message history
|
|
129
|
+
ai_response_message: ChatCompletionMessageParam = {
|
|
130
|
+
"role": "assistant",
|
|
131
|
+
"content": accumulated_response,
|
|
132
|
+
}
|
|
133
|
+
await message_history.append_message(
|
|
134
|
+
ai_response_message, ai_response_message, model, provider, metadata.threadId
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
return accumulated_response
|
|
138
|
+
|
|
139
|
+
# Use the static methods directly
|
|
140
|
+
get_chat_completion = ChatCompletionHandler.get_completion
|
|
141
|
+
stream_chat_completion = ChatCompletionHandler.stream_completion
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Union, AsyncGenerator, Callable
|
|
5
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
6
|
+
from mito_ai.completions.models import CodeExplainMetadata, MessageType, CompletionRequest, CompletionStreamChunk, CompletionReply
|
|
7
|
+
from mito_ai.completions.prompt_builders.explain_code_prompt import create_explain_code_prompt
|
|
8
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
9
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
10
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
11
|
+
from mito_ai.completions.completion_handlers.utils import append_chat_system_message
|
|
12
|
+
|
|
13
|
+
__all__ = ["get_code_explain_completion", "stream_code_explain_completion"]
|
|
14
|
+
|
|
15
|
+
class CodeExplainHandler(CompletionHandler[CodeExplainMetadata]):
|
|
16
|
+
"""Handler for code explain completions."""
|
|
17
|
+
|
|
18
|
+
@staticmethod
|
|
19
|
+
async def get_completion(
|
|
20
|
+
metadata: CodeExplainMetadata,
|
|
21
|
+
provider: OpenAIProvider,
|
|
22
|
+
message_history: GlobalMessageHistory,
|
|
23
|
+
model: str
|
|
24
|
+
) -> str:
|
|
25
|
+
"""Get a code explain completion from the AI provider."""
|
|
26
|
+
active_cell_code = metadata.activeCellCode or ''
|
|
27
|
+
thread_id = metadata.threadId
|
|
28
|
+
|
|
29
|
+
# Add the system message if it doesn't already exist
|
|
30
|
+
await append_chat_system_message(message_history, model, provider, thread_id)
|
|
31
|
+
|
|
32
|
+
# Create the prompt
|
|
33
|
+
prompt = create_explain_code_prompt(active_cell_code)
|
|
34
|
+
display_prompt = f"```python{active_cell_code}```"
|
|
35
|
+
|
|
36
|
+
# Add the prompt to the message history
|
|
37
|
+
new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
|
|
38
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
39
|
+
await message_history.append_message(
|
|
40
|
+
new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
# Get the completion
|
|
44
|
+
completion = await provider.request_completions(
|
|
45
|
+
messages=message_history.get_ai_optimized_history(thread_id),
|
|
46
|
+
model=model,
|
|
47
|
+
message_type=MessageType.CODE_EXPLAIN,
|
|
48
|
+
thread_id=thread_id
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Add the response to message history
|
|
52
|
+
ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
|
|
53
|
+
await message_history.append_message(ai_response_message, ai_response_message, model, provider, thread_id)
|
|
54
|
+
|
|
55
|
+
return completion
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
async def stream_completion(
|
|
59
|
+
metadata: CodeExplainMetadata,
|
|
60
|
+
provider: OpenAIProvider,
|
|
61
|
+
message_history: GlobalMessageHistory,
|
|
62
|
+
message_id: str,
|
|
63
|
+
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
|
|
64
|
+
model: str
|
|
65
|
+
) -> str:
|
|
66
|
+
"""Stream code explain completions from the AI provider.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
metadata: The metadata for the code explain completion request.
|
|
70
|
+
provider: The AI provider to use.
|
|
71
|
+
message_history: The message history for this conversation.
|
|
72
|
+
message_id: The ID of the message being processed.
|
|
73
|
+
reply_fn: Function to call with each chunk for streaming replies.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
The accumulated response string.
|
|
77
|
+
"""
|
|
78
|
+
active_cell_code = metadata.activeCellCode or ''
|
|
79
|
+
thread_id = metadata.threadId
|
|
80
|
+
|
|
81
|
+
# Add the system message if it doesn't already exist
|
|
82
|
+
await append_chat_system_message(message_history, model, provider, thread_id)
|
|
83
|
+
|
|
84
|
+
# Create the prompt
|
|
85
|
+
prompt = create_explain_code_prompt(active_cell_code)
|
|
86
|
+
display_prompt = f"```python{active_cell_code}```"
|
|
87
|
+
|
|
88
|
+
# Add the prompt to the message history
|
|
89
|
+
new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
|
|
90
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
91
|
+
await message_history.append_message(
|
|
92
|
+
new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Stream the completions using the provider's stream method
|
|
96
|
+
accumulated_response = await provider.stream_completions(
|
|
97
|
+
message_type=MessageType.CODE_EXPLAIN,
|
|
98
|
+
messages=message_history.get_ai_optimized_history(thread_id),
|
|
99
|
+
model=model,
|
|
100
|
+
message_id=message_id,
|
|
101
|
+
reply_fn=reply_fn,
|
|
102
|
+
thread_id=thread_id
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Add the response to message history
|
|
106
|
+
ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": accumulated_response}
|
|
107
|
+
await message_history.append_message(ai_response_message, ai_response_message, model, provider, thread_id)
|
|
108
|
+
|
|
109
|
+
return accumulated_response
|
|
110
|
+
|
|
111
|
+
# Use the static methods directly
|
|
112
|
+
get_code_explain_completion = CodeExplainHandler.get_completion
|
|
113
|
+
stream_code_explain_completion = CodeExplainHandler.stream_completion
|