mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any, Dict, Optional, Callable, Union, AsyncGenerator
|
|
8
|
+
from mito_ai.completions.models import MessageType, CompletionReply, CompletionStreamChunk, CompletionItem
|
|
9
|
+
from mito_ai.utils.server_limits import check_mito_server_quota, update_mito_server_quota
|
|
10
|
+
from tornado.httpclient import HTTPResponse
|
|
11
|
+
from mito_ai.constants import MITO_GEMINI_URL
|
|
12
|
+
from mito_ai.utils.utils import _create_http_client
|
|
13
|
+
|
|
14
|
+
MITO_ERROR_MARKER = "MITO_ERROR_MARKER:"
|
|
15
|
+
|
|
16
|
+
class ProviderCompletionException(Exception):
|
|
17
|
+
"""Custom exception for Mito server errors that converts well to CompletionError."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, error_message: str, provider_name: str = "LLM Provider", error_type: str = "LLMProviderError"):
|
|
20
|
+
self.error_message = error_message
|
|
21
|
+
self.provider_name = provider_name
|
|
22
|
+
self.error_type = error_type
|
|
23
|
+
|
|
24
|
+
# Create user-friendly title and hint
|
|
25
|
+
self.user_friendly_title = f"{provider_name} Error: {error_message}"
|
|
26
|
+
self.user_friendly_hint = f"There was a problem with {provider_name}. Try switching to a different model and trying again."
|
|
27
|
+
|
|
28
|
+
# Set args[0] for fallback compatibility
|
|
29
|
+
super().__init__(self.user_friendly_title)
|
|
30
|
+
|
|
31
|
+
def __str__(self) -> str:
|
|
32
|
+
return f"{self.provider_name} Error: {self.error_message}"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def get_response_from_mito_server(
|
|
36
|
+
url: str,
|
|
37
|
+
headers: dict,
|
|
38
|
+
data: Dict[str, Any],
|
|
39
|
+
timeout: int,
|
|
40
|
+
max_retries: int,
|
|
41
|
+
message_type: MessageType,
|
|
42
|
+
provider_name: str = "Mito Server"
|
|
43
|
+
) -> str:
|
|
44
|
+
"""
|
|
45
|
+
Get a response from the Mito server.
|
|
46
|
+
|
|
47
|
+
Raises:
|
|
48
|
+
ProviderCompletionException: When the server returns an error or invalid response
|
|
49
|
+
Exception: For network/HTTP errors (let these bubble up to be handled by retry logic)
|
|
50
|
+
"""
|
|
51
|
+
# First check the mito server quota. If the user has reached the limit, we raise an exception.
|
|
52
|
+
check_mito_server_quota(message_type)
|
|
53
|
+
|
|
54
|
+
http_client, http_client_timeout = _create_http_client(timeout, max_retries)
|
|
55
|
+
start_time = time.time()
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
res = await http_client.fetch(
|
|
59
|
+
url,
|
|
60
|
+
method="POST",
|
|
61
|
+
headers=headers,
|
|
62
|
+
body=json.dumps(data),
|
|
63
|
+
request_timeout=http_client_timeout
|
|
64
|
+
)
|
|
65
|
+
print(f"Mito server request completed in {time.time() - start_time:.2f} seconds")
|
|
66
|
+
|
|
67
|
+
# Parse and validate response
|
|
68
|
+
try:
|
|
69
|
+
content = json.loads(res.body.decode("utf-8"))
|
|
70
|
+
|
|
71
|
+
if "completion" in content:
|
|
72
|
+
return content["completion"] # type: ignore
|
|
73
|
+
elif "error" in content:
|
|
74
|
+
# Server returned an error
|
|
75
|
+
raise ProviderCompletionException(content['error'], provider_name=provider_name)
|
|
76
|
+
else:
|
|
77
|
+
# Invalid response format
|
|
78
|
+
raise ProviderCompletionException(f"No completion found in response: {content}", provider_name=provider_name)
|
|
79
|
+
except ProviderCompletionException:
|
|
80
|
+
# Re-raise ProviderCompletionException as-is
|
|
81
|
+
raise
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise ProviderCompletionException(f"Error parsing response: {str(e)}", provider_name=provider_name)
|
|
84
|
+
|
|
85
|
+
finally:
|
|
86
|
+
try:
|
|
87
|
+
# We always update the quota, even if there is an error
|
|
88
|
+
update_mito_server_quota(message_type)
|
|
89
|
+
except Exception as e:
|
|
90
|
+
pass
|
|
91
|
+
|
|
92
|
+
http_client.close()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
async def stream_response_from_mito_server(
|
|
96
|
+
url: str,
|
|
97
|
+
headers: Dict[str, str],
|
|
98
|
+
data: Dict[str, Any],
|
|
99
|
+
timeout: int,
|
|
100
|
+
max_retries: int,
|
|
101
|
+
message_type: MessageType,
|
|
102
|
+
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
|
|
103
|
+
message_id: str,
|
|
104
|
+
chunk_processor: Optional[Callable[[str], str]] = None,
|
|
105
|
+
provider_name: str = "Mito Server",
|
|
106
|
+
) -> AsyncGenerator[str, None]:
|
|
107
|
+
"""
|
|
108
|
+
Stream responses from the Mito server.
|
|
109
|
+
|
|
110
|
+
This is a unified streaming function that can be used by all providers (OpenAI, Anthropic, Gemini).
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
url: The Mito server URL to stream from
|
|
114
|
+
headers: Request headers
|
|
115
|
+
data: Request data
|
|
116
|
+
timeout: Request timeout in seconds
|
|
117
|
+
max_retries: Maximum number of retries
|
|
118
|
+
message_type: The message type for quota tracking
|
|
119
|
+
provider_name: Name of the provider for error messages
|
|
120
|
+
reply_fn: Optional function to call with each chunk for streaming replies
|
|
121
|
+
message_id: The message ID to track the request
|
|
122
|
+
chunk_processor: Optional function to process chunks before yielding (e.g., for Gemini's special processing)
|
|
123
|
+
|
|
124
|
+
Yields:
|
|
125
|
+
Chunks of text from the streaming response
|
|
126
|
+
"""
|
|
127
|
+
# Check the mito server quota
|
|
128
|
+
check_mito_server_quota(message_type)
|
|
129
|
+
|
|
130
|
+
# Create HTTP client with appropriate timeout settings
|
|
131
|
+
http_client, http_client_timeout = _create_http_client(timeout, max_retries)
|
|
132
|
+
|
|
133
|
+
# Set up streaming infrastructure
|
|
134
|
+
start_time = time.time()
|
|
135
|
+
chunk_queue: asyncio.Queue[str] = asyncio.Queue()
|
|
136
|
+
fetch_complete = False
|
|
137
|
+
|
|
138
|
+
# Define a callback to process chunks and add them to the queue
|
|
139
|
+
def chunk_callback(chunk: bytes) -> None:
|
|
140
|
+
try:
|
|
141
|
+
chunk_str = chunk.decode('utf-8')
|
|
142
|
+
asyncio.create_task(chunk_queue.put(chunk_str))
|
|
143
|
+
except Exception as e:
|
|
144
|
+
print(f"Error processing {provider_name} streaming chunk: {str(e)}")
|
|
145
|
+
|
|
146
|
+
# Execute the streaming request
|
|
147
|
+
fetch_future = None
|
|
148
|
+
try:
|
|
149
|
+
fetch_future = http_client.fetch(
|
|
150
|
+
url,
|
|
151
|
+
method="POST",
|
|
152
|
+
headers=headers,
|
|
153
|
+
body=json.dumps(data),
|
|
154
|
+
request_timeout=http_client_timeout,
|
|
155
|
+
streaming_callback=chunk_callback
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Create a task to wait for the fetch to complete
|
|
159
|
+
async def wait_for_fetch() -> None:
|
|
160
|
+
try:
|
|
161
|
+
await fetch_future
|
|
162
|
+
nonlocal fetch_complete
|
|
163
|
+
fetch_complete = True
|
|
164
|
+
print(f"{provider_name} fetch completed")
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"Error in {provider_name} fetch: {str(e)}")
|
|
167
|
+
raise
|
|
168
|
+
|
|
169
|
+
# Start the task to wait for fetch completion
|
|
170
|
+
fetch_task = asyncio.create_task(wait_for_fetch())
|
|
171
|
+
|
|
172
|
+
# Yield chunks as they arrive
|
|
173
|
+
while not (fetch_complete and chunk_queue.empty()):
|
|
174
|
+
try:
|
|
175
|
+
# Wait for a chunk with a timeout to prevent deadlocks
|
|
176
|
+
chunk = await asyncio.wait_for(chunk_queue.get(), timeout=0.1)
|
|
177
|
+
|
|
178
|
+
# Process chunk if processor is provided
|
|
179
|
+
processed_chunk = chunk
|
|
180
|
+
if chunk_processor:
|
|
181
|
+
processed_chunk = chunk_processor(chunk)
|
|
182
|
+
|
|
183
|
+
# Check if this chunk contains an error marker
|
|
184
|
+
if processed_chunk.startswith(MITO_ERROR_MARKER):
|
|
185
|
+
error_message = processed_chunk[len(MITO_ERROR_MARKER):]
|
|
186
|
+
print(f"Detected error in {provider_name} stream: {error_message}")
|
|
187
|
+
raise ProviderCompletionException(error_message, provider_name=provider_name)
|
|
188
|
+
|
|
189
|
+
if reply_fn is not None and message_id is not None:
|
|
190
|
+
# Send the chunk directly to the frontend
|
|
191
|
+
reply_fn(CompletionStreamChunk(
|
|
192
|
+
parent_id=message_id,
|
|
193
|
+
chunk=CompletionItem(
|
|
194
|
+
content=processed_chunk,
|
|
195
|
+
isIncomplete=True,
|
|
196
|
+
token=message_id,
|
|
197
|
+
),
|
|
198
|
+
done=False,
|
|
199
|
+
))
|
|
200
|
+
|
|
201
|
+
yield chunk
|
|
202
|
+
except asyncio.TimeoutError:
|
|
203
|
+
# No chunk available within timeout, check if fetch is complete
|
|
204
|
+
if fetch_complete and chunk_queue.empty():
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
# Otherwise continue waiting for chunks
|
|
208
|
+
continue
|
|
209
|
+
|
|
210
|
+
print(f"\n{provider_name} stream completed in {time.time() - start_time:.2f} seconds")
|
|
211
|
+
|
|
212
|
+
if reply_fn is not None and message_id is not None:
|
|
213
|
+
# Send a final chunk to indicate completion
|
|
214
|
+
reply_fn(CompletionStreamChunk(
|
|
215
|
+
parent_id=message_id,
|
|
216
|
+
chunk=CompletionItem(
|
|
217
|
+
content="",
|
|
218
|
+
isIncomplete=False,
|
|
219
|
+
token=message_id,
|
|
220
|
+
),
|
|
221
|
+
done=True,
|
|
222
|
+
))
|
|
223
|
+
except Exception as e:
|
|
224
|
+
print(f"\n{provider_name} stream failed after {time.time() - start_time:.2f} seconds with error: {str(e)}")
|
|
225
|
+
# If an exception occurred, ensure the fetch future is awaited to properly clean up
|
|
226
|
+
if fetch_future:
|
|
227
|
+
try:
|
|
228
|
+
await fetch_future
|
|
229
|
+
except Exception:
|
|
230
|
+
pass
|
|
231
|
+
raise
|
|
232
|
+
finally:
|
|
233
|
+
# Clean up resources
|
|
234
|
+
try:
|
|
235
|
+
# We always update the quota, even if there is an error
|
|
236
|
+
update_mito_server_quota(message_type)
|
|
237
|
+
except Exception as e:
|
|
238
|
+
pass
|
|
239
|
+
|
|
240
|
+
http_client.close()
|
|
241
|
+
|
|
242
|
+
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# coding: utf-8
|
|
3
|
+
# Copyright (c) Saga Inc.
|
|
4
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# Copyright (c) Saga Inc.
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import json
|
|
11
|
+
import time
|
|
12
|
+
from typing import Any, Dict, List, Optional, Final, Union, AsyncGenerator, Tuple, Callable
|
|
13
|
+
from mito_ai.utils.mito_server_utils import get_response_from_mito_server, stream_response_from_mito_server
|
|
14
|
+
from mito_ai.utils.provider_utils import does_message_require_fast_model
|
|
15
|
+
from tornado.httpclient import AsyncHTTPClient
|
|
16
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
17
|
+
|
|
18
|
+
from mito_ai.utils.utils import is_running_test
|
|
19
|
+
from mito_ai.completions.models import MessageType, ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem
|
|
20
|
+
from mito_ai.utils.schema import UJ_STATIC_USER_ID, UJ_USER_EMAIL
|
|
21
|
+
from mito_ai.utils.db import get_user_field
|
|
22
|
+
from mito_ai.utils.version_utils import is_pro
|
|
23
|
+
from mito_ai.utils.server_limits import check_mito_server_quota
|
|
24
|
+
from mito_ai.utils.telemetry_utils import log_ai_completion_success
|
|
25
|
+
from .utils import _create_http_client
|
|
26
|
+
from mito_ai.constants import MITO_OPENAI_URL
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
__user_email: Optional[str] = None
|
|
30
|
+
__user_id: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
FAST_OPENAI_MODEL = "gpt-4.1-nano"
|
|
33
|
+
|
|
34
|
+
def _prepare_request_data_and_headers(
|
|
35
|
+
last_message_content: Union[str, None],
|
|
36
|
+
ai_completion_data: Dict[str, Any],
|
|
37
|
+
timeout: int,
|
|
38
|
+
max_retries: int,
|
|
39
|
+
message_type: MessageType,
|
|
40
|
+
) -> Tuple[Dict[str, Any], Dict[str, str]]:
|
|
41
|
+
"""
|
|
42
|
+
Prepare request data and headers for Mito server API calls.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
last_message_content: The last message content
|
|
46
|
+
ai_completion_data: The AI completion data
|
|
47
|
+
timeout: The timeout in seconds
|
|
48
|
+
max_retries: The maximum number of retries
|
|
49
|
+
message_type: The message type
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
A tuple containing the request data and headers
|
|
53
|
+
"""
|
|
54
|
+
global __user_email, __user_id
|
|
55
|
+
|
|
56
|
+
if __user_email is None:
|
|
57
|
+
__user_email = get_user_field(UJ_USER_EMAIL)
|
|
58
|
+
if __user_id is None:
|
|
59
|
+
__user_id = get_user_field(UJ_STATIC_USER_ID)
|
|
60
|
+
|
|
61
|
+
data = {
|
|
62
|
+
"timeout": timeout,
|
|
63
|
+
"max_retries": max_retries,
|
|
64
|
+
"email": __user_email,
|
|
65
|
+
"user_id": __user_id,
|
|
66
|
+
"data": ai_completion_data,
|
|
67
|
+
"user_input": last_message_content or "", # We add this just for logging purposes
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
headers = {
|
|
71
|
+
"Content-Type": "application/json",
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
return data, headers
|
|
75
|
+
|
|
76
|
+
async def get_ai_completion_from_mito_server(
|
|
77
|
+
last_message_content: Union[str, None],
|
|
78
|
+
ai_completion_data: Dict[str, Any],
|
|
79
|
+
timeout: int,
|
|
80
|
+
max_retries: int,
|
|
81
|
+
message_type: MessageType,
|
|
82
|
+
) -> str:
|
|
83
|
+
|
|
84
|
+
# Prepare request data and headers
|
|
85
|
+
data, headers = _prepare_request_data_and_headers(
|
|
86
|
+
last_message_content,
|
|
87
|
+
ai_completion_data,
|
|
88
|
+
timeout,
|
|
89
|
+
max_retries,
|
|
90
|
+
message_type
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
return await get_response_from_mito_server(
|
|
94
|
+
MITO_OPENAI_URL,
|
|
95
|
+
headers,
|
|
96
|
+
data,
|
|
97
|
+
timeout,
|
|
98
|
+
max_retries,
|
|
99
|
+
message_type,
|
|
100
|
+
provider_name="OpenAI"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
async def stream_ai_completion_from_mito_server(
|
|
104
|
+
last_message_content: Union[str, None],
|
|
105
|
+
ai_completion_data: Dict[str, Any],
|
|
106
|
+
timeout: int,
|
|
107
|
+
max_retries: int,
|
|
108
|
+
message_type: MessageType,
|
|
109
|
+
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
|
|
110
|
+
message_id: str,
|
|
111
|
+
) -> AsyncGenerator[str, None]:
|
|
112
|
+
"""
|
|
113
|
+
Stream AI completions from the Mito server.
|
|
114
|
+
|
|
115
|
+
This function is similar to get_ai_completion_from_mito_server but handles streaming responses.
|
|
116
|
+
It yields the streamed content as it arrives.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
last_message_content: The last message content
|
|
120
|
+
ai_completion_data: The AI completion data
|
|
121
|
+
timeout: The timeout in seconds
|
|
122
|
+
max_retries: The maximum number of retries
|
|
123
|
+
message_type: The message type
|
|
124
|
+
reply_fn: Optional function to call with each chunk for streaming replies
|
|
125
|
+
message_id: The message ID to track the request
|
|
126
|
+
|
|
127
|
+
Yields:
|
|
128
|
+
Chunks of text from the streaming response
|
|
129
|
+
"""
|
|
130
|
+
# Prepare request data and headers
|
|
131
|
+
data, headers = _prepare_request_data_and_headers(
|
|
132
|
+
last_message_content,
|
|
133
|
+
ai_completion_data,
|
|
134
|
+
timeout,
|
|
135
|
+
max_retries,
|
|
136
|
+
message_type
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Use the unified streaming function
|
|
140
|
+
async for chunk in stream_response_from_mito_server(
|
|
141
|
+
url=MITO_OPENAI_URL,
|
|
142
|
+
headers=headers,
|
|
143
|
+
data=data,
|
|
144
|
+
timeout=timeout,
|
|
145
|
+
max_retries=max_retries,
|
|
146
|
+
message_type=message_type,
|
|
147
|
+
reply_fn=reply_fn,
|
|
148
|
+
message_id=message_id,
|
|
149
|
+
chunk_processor=None,
|
|
150
|
+
provider_name="OpenAI",
|
|
151
|
+
):
|
|
152
|
+
yield chunk
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def get_open_ai_completion_function_params(
|
|
156
|
+
message_type: MessageType,
|
|
157
|
+
model: str,
|
|
158
|
+
messages: List[ChatCompletionMessageParam],
|
|
159
|
+
stream: bool,
|
|
160
|
+
response_format_info: Optional[ResponseFormatInfo] = None,
|
|
161
|
+
) -> Dict[str, Any]:
|
|
162
|
+
|
|
163
|
+
print("MESSAGE TYPE: ", message_type)
|
|
164
|
+
message_requires_fast_model = does_message_require_fast_model(message_type)
|
|
165
|
+
model = FAST_OPENAI_MODEL if message_requires_fast_model else model
|
|
166
|
+
|
|
167
|
+
print(f"model: {model}")
|
|
168
|
+
|
|
169
|
+
completion_function_params = {
|
|
170
|
+
"model": model,
|
|
171
|
+
"stream": stream,
|
|
172
|
+
"messages": messages,
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
# If a response format is provided, we need to convert it to a json schema.
|
|
176
|
+
# Pydantic models are supported by the OpenAI API, however, we need to be able to
|
|
177
|
+
# serialize it for requests that are going to be sent to the mito server.
|
|
178
|
+
# OpenAI expects a very specific schema as seen below.
|
|
179
|
+
if response_format_info:
|
|
180
|
+
json_schema = response_format_info.format.schema()
|
|
181
|
+
|
|
182
|
+
# Add additionalProperties: False to the top-level schema
|
|
183
|
+
json_schema["additionalProperties"] = False
|
|
184
|
+
|
|
185
|
+
# Nested object definitions in $defs need to have additionalProperties set to False also
|
|
186
|
+
if "$defs" in json_schema:
|
|
187
|
+
for def_name, def_schema in json_schema["$defs"].items():
|
|
188
|
+
if def_schema.get("type") == "object":
|
|
189
|
+
def_schema["additionalProperties"] = False
|
|
190
|
+
|
|
191
|
+
completion_function_params["response_format"] = {
|
|
192
|
+
"type": "json_schema",
|
|
193
|
+
"json_schema": {
|
|
194
|
+
"name": f"{response_format_info.name}",
|
|
195
|
+
"schema": json_schema,
|
|
196
|
+
"strict": True
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
return completion_function_params
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import Union
|
|
5
|
+
|
|
6
|
+
from mito_ai.completions.models import MessageType
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_model_provider(model: str) -> Union[str, None]:
|
|
10
|
+
"""
|
|
11
|
+
Determine the model type based on the model name prefix
|
|
12
|
+
"""
|
|
13
|
+
if not model:
|
|
14
|
+
return None
|
|
15
|
+
|
|
16
|
+
model_lower = model.lower()
|
|
17
|
+
|
|
18
|
+
if model_lower.startswith('claude'):
|
|
19
|
+
return 'claude'
|
|
20
|
+
elif model_lower.startswith('gemini'):
|
|
21
|
+
return 'gemini'
|
|
22
|
+
elif model_lower.startswith('ollama'):
|
|
23
|
+
return 'ollama'
|
|
24
|
+
elif model_lower.startswith('gpt'):
|
|
25
|
+
return 'openai'
|
|
26
|
+
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def does_message_require_fast_model(message_type: MessageType) -> bool:
|
|
31
|
+
"""
|
|
32
|
+
Determines if a message requires the fast model.
|
|
33
|
+
|
|
34
|
+
The fast model is used for messages that are not chat messages.
|
|
35
|
+
For example, inline completions and chat name generation need to be fast
|
|
36
|
+
so they don't slow down the user's experience.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
if message_type in (MessageType.CHAT, MessageType.SMART_DEBUG, MessageType.CODE_EXPLAIN, MessageType.AGENT_EXECUTION, MessageType.AGENT_AUTO_ERROR_FIXUP):
|
|
40
|
+
return False
|
|
41
|
+
elif message_type in (MessageType.INLINE_COMPLETION, MessageType.CHAT_NAME_GENERATION):
|
|
42
|
+
return True
|
|
43
|
+
elif message_type in (MessageType.START_NEW_CHAT, MessageType.FETCH_HISTORY, MessageType.GET_THREADS, MessageType.DELETE_THREAD, MessageType.UPDATE_MODEL_CONFIG):
|
|
44
|
+
# These messages don't use any model, but we add them here for type safety
|
|
45
|
+
return True
|
|
46
|
+
else:
|
|
47
|
+
raise ValueError(f"Invalid message type: {message_type}")
|
|
48
|
+
|
|
49
|
+
|
mito_ai/utils/schema.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from .utils import get_random_id
|
|
7
|
+
|
|
8
|
+
# Current field names
|
|
9
|
+
# Some helpful constants
|
|
10
|
+
GITHUB_ACTION_ID = 'github_action'
|
|
11
|
+
GITHUB_ACTION_EMAIL = 'github@action.com'
|
|
12
|
+
|
|
13
|
+
# Old field names
|
|
14
|
+
UJ_INTENDED_BEHAVIOR = 'intended_behavior'
|
|
15
|
+
UJ_CLOSED_FEEDBACK = 'closed_feedback'
|
|
16
|
+
UJ_MITOSHEET_LAST_FIVE_USAGES = 'mitosheet_last_five_usages'
|
|
17
|
+
|
|
18
|
+
# Current field names
|
|
19
|
+
UJ_USER_JSON_VERSION = 'user_json_version'
|
|
20
|
+
UJ_STATIC_USER_ID = 'static_user_id'
|
|
21
|
+
UJ_USER_SALT = 'user_salt'
|
|
22
|
+
UJ_USER_EMAIL = 'user_email'
|
|
23
|
+
UJ_RECEIVED_TOURS = 'received_tours'
|
|
24
|
+
UJ_FEEDBACKS = 'feedbacks'
|
|
25
|
+
UJ_FEEDBACKS_V2 = 'feedbacks_v2'
|
|
26
|
+
UJ_MITOSHEET_CURRENT_VERSION = 'mitosheet_current_version'
|
|
27
|
+
UJ_MITOSHEET_LAST_UPGRADED_DATE = 'mitosheet_last_upgraded_date'
|
|
28
|
+
UJ_MITOSHEET_LAST_FIFTY_USAGES = 'mitosheet_last_fifty_usages'
|
|
29
|
+
UJ_MITOSHEET_TELEMETRY = 'mitosheet_telemetry'
|
|
30
|
+
UJ_MITOSHEET_PRO = 'mitosheet_pro'
|
|
31
|
+
UJ_MITOSHEET_ENTERPRISE = 'mitosheet_enterprise'
|
|
32
|
+
UJ_EXPERIMENT = 'experiment'
|
|
33
|
+
UJ_RECEIVED_CHECKLISTS = 'received_checklists'
|
|
34
|
+
UJ_AI_PRIVACY_POLICY = 'ai_privacy_policy'
|
|
35
|
+
UJ_AI_MITO_API_NUM_USAGES = 'ai_mito_api_num_usages'
|
|
36
|
+
UJ_AI_MITO_AUTOCOMPLETE_NUM_USAGES = 'ai_mito_autocomplete_num_usages'
|
|
37
|
+
UJ_MITO_AI_FIRST_USAGE_DATE = 'mito_ai_first_usage_date'
|
|
38
|
+
UJ_MITO_AI_LAST_RESET_DATE = 'mito_ai_last_reset_date'
|
|
39
|
+
|
|
40
|
+
MITO_CONFIG_KEY_HOME_FOLDER = 'MITO_CONFIG_HOME_FOLDER'
|
|
41
|
+
if MITO_CONFIG_KEY_HOME_FOLDER in os.environ:
|
|
42
|
+
HOME_FOLDER = os.path.expanduser(os.environ[MITO_CONFIG_KEY_HOME_FOLDER])
|
|
43
|
+
else:
|
|
44
|
+
HOME_FOLDER = os.path.expanduser('~')
|
|
45
|
+
|
|
46
|
+
# Where all global .mito files are stored
|
|
47
|
+
MITO_FOLDER = os.path.join(HOME_FOLDER, ".mito")
|
|
48
|
+
|
|
49
|
+
"""
|
|
50
|
+
The most up to date version of the user.json object
|
|
51
|
+
"""
|
|
52
|
+
USER_JSON_VERSION_10 = {
|
|
53
|
+
# The new version of the user json object
|
|
54
|
+
UJ_USER_JSON_VERSION: 10,
|
|
55
|
+
# The static id of the user
|
|
56
|
+
UJ_STATIC_USER_ID: get_random_id(),
|
|
57
|
+
# A random secret that the user can use as salt when hashing things
|
|
58
|
+
UJ_USER_SALT: get_random_id(),
|
|
59
|
+
# Email of the user
|
|
60
|
+
UJ_USER_EMAIL: '',
|
|
61
|
+
# Tours that the user has received
|
|
62
|
+
UJ_RECEIVED_TOURS: [],
|
|
63
|
+
# A list of all the feedback the user has given
|
|
64
|
+
UJ_FEEDBACKS: [],
|
|
65
|
+
UJ_FEEDBACKS_V2: {},
|
|
66
|
+
UJ_MITOSHEET_CURRENT_VERSION: 0,
|
|
67
|
+
UJ_MITOSHEET_LAST_UPGRADED_DATE: datetime.today().strftime('%Y-%m-%d'),
|
|
68
|
+
UJ_MITOSHEET_LAST_FIFTY_USAGES: [datetime.today().strftime('%Y-%m-%d')],
|
|
69
|
+
UJ_MITOSHEET_TELEMETRY: True,
|
|
70
|
+
UJ_MITOSHEET_PRO: False,
|
|
71
|
+
UJ_MITOSHEET_ENTERPRISE: False,
|
|
72
|
+
UJ_EXPERIMENT: {
|
|
73
|
+
'experiment_id': 'installer_communication_and_time_to_value',
|
|
74
|
+
'variant': 'B',
|
|
75
|
+
},
|
|
76
|
+
UJ_RECEIVED_CHECKLISTS: {},
|
|
77
|
+
UJ_AI_PRIVACY_POLICY: False,
|
|
78
|
+
UJ_AI_MITO_API_NUM_USAGES: 0,
|
|
79
|
+
UJ_AI_MITO_AUTOCOMPLETE_NUM_USAGES: 0,
|
|
80
|
+
UJ_MITO_AI_FIRST_USAGE_DATE: datetime.today().strftime('%Y-%m-%d'),
|
|
81
|
+
UJ_MITO_AI_LAST_RESET_DATE: datetime.today().strftime('%Y-%m-%d'),
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# This is the most up to date user json, and you must update it when
|
|
85
|
+
# you add a new schema
|
|
86
|
+
USER_JSON_DEFAULT = USER_JSON_VERSION_10
|