mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import Protocol, TypeVar
|
|
5
|
+
from abc import abstractmethod, ABCMeta
|
|
6
|
+
from mito_ai.completions.models import ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
|
|
7
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
8
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
9
|
+
|
|
10
|
+
T = TypeVar('T', ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, AgentSmartDebugMetadata, InlineCompleterMetadata, contravariant=True)
|
|
11
|
+
|
|
12
|
+
class CompletionHandler(Protocol[T], metaclass=ABCMeta):
|
|
13
|
+
"""Protocol defining the interface for completion handlers.
|
|
14
|
+
|
|
15
|
+
All completion handler classes should implement this protocol to ensure
|
|
16
|
+
they provide a get_completion static method with the correct signature.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
@staticmethod
|
|
20
|
+
@abstractmethod
|
|
21
|
+
async def get_completion(
|
|
22
|
+
metadata: T,
|
|
23
|
+
provider: OpenAIProvider,
|
|
24
|
+
message_history: GlobalMessageHistory,
|
|
25
|
+
model: str
|
|
26
|
+
) -> str:
|
|
27
|
+
"""Get a completion from the AI provider.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
metadata: Metadata about the completion request
|
|
31
|
+
provider: The AI provider to use
|
|
32
|
+
message_history: The history of messages in the conversation
|
|
33
|
+
model: The model to use for the completion
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
The completion string from the AI
|
|
37
|
+
"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List
|
|
5
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
6
|
+
from mito_ai.completions.models import InlineCompleterMetadata, MessageType
|
|
7
|
+
from mito_ai.completions.prompt_builders.inline_completer_prompt import create_inline_prompt
|
|
8
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
9
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
10
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
11
|
+
|
|
12
|
+
__all__ = ["get_inline_completion"]
|
|
13
|
+
|
|
14
|
+
class InlineCompleterHandler(CompletionHandler[InlineCompleterMetadata]):
|
|
15
|
+
"""Handler for inline completions."""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
async def get_completion(
|
|
19
|
+
metadata: InlineCompleterMetadata,
|
|
20
|
+
provider: OpenAIProvider,
|
|
21
|
+
message_history: GlobalMessageHistory,
|
|
22
|
+
model: str
|
|
23
|
+
) -> str:
|
|
24
|
+
"""Get an inline completion from the AI provider."""
|
|
25
|
+
|
|
26
|
+
# Create the prompt
|
|
27
|
+
prompt = create_inline_prompt(
|
|
28
|
+
metadata.prefix or '',
|
|
29
|
+
metadata.suffix or '',
|
|
30
|
+
metadata.variables or [],
|
|
31
|
+
metadata.files or []
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Each inline completer is independent and ephemeral. So we do not use the message history.
|
|
35
|
+
messages: List[ChatCompletionMessageParam] = [{"role": "user", "content": prompt}]
|
|
36
|
+
|
|
37
|
+
# Get the completion
|
|
38
|
+
completion = await provider.request_completions(
|
|
39
|
+
messages=messages,
|
|
40
|
+
model=model,
|
|
41
|
+
message_type=MessageType.INLINE_COMPLETION,
|
|
42
|
+
thread_id=None
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
return completion
|
|
46
|
+
|
|
47
|
+
# Use the static method directly
|
|
48
|
+
get_inline_completion = InlineCompleterHandler.get_completion
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Union, AsyncGenerator, Callable
|
|
5
|
+
|
|
6
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
7
|
+
from mito_ai.completions.models import (
|
|
8
|
+
SmartDebugMetadata,
|
|
9
|
+
MessageType,
|
|
10
|
+
CompletionRequest,
|
|
11
|
+
CompletionStreamChunk,
|
|
12
|
+
CompletionReply,
|
|
13
|
+
)
|
|
14
|
+
from mito_ai.completions.prompt_builders.smart_debug_prompt import (
|
|
15
|
+
create_error_prompt,
|
|
16
|
+
remove_inner_thoughts_from_message,
|
|
17
|
+
)
|
|
18
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
19
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
20
|
+
from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
|
|
21
|
+
from mito_ai.completions.completion_handlers.utils import append_chat_system_message
|
|
22
|
+
|
|
23
|
+
__all__ = ["get_smart_debug_completion", "stream_smart_debug_completion"]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
|
|
27
|
+
"""Handler for smart debug completions."""
|
|
28
|
+
|
|
29
|
+
@staticmethod
|
|
30
|
+
async def get_completion(
|
|
31
|
+
metadata: SmartDebugMetadata,
|
|
32
|
+
provider: OpenAIProvider,
|
|
33
|
+
message_history: GlobalMessageHistory,
|
|
34
|
+
model: str
|
|
35
|
+
) -> str:
|
|
36
|
+
"""Get a smart debug completion from the AI provider."""
|
|
37
|
+
|
|
38
|
+
error_message = metadata.errorMessage
|
|
39
|
+
active_cell_code = metadata.activeCellCode
|
|
40
|
+
active_cell_id = metadata.activeCellId
|
|
41
|
+
variables = metadata.variables or []
|
|
42
|
+
files = metadata.files or []
|
|
43
|
+
thread_id = metadata.threadId
|
|
44
|
+
|
|
45
|
+
# Add the system message if it doesn't already exist
|
|
46
|
+
await append_chat_system_message(message_history, model, provider, thread_id)
|
|
47
|
+
|
|
48
|
+
# Create the prompt
|
|
49
|
+
prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
|
|
50
|
+
display_prompt = f"```python{active_cell_code or ''}```{error_message}"
|
|
51
|
+
|
|
52
|
+
# Add the prompt to the message history
|
|
53
|
+
new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
|
|
54
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
55
|
+
await message_history.append_message(
|
|
56
|
+
new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# Get the completion
|
|
60
|
+
completion = await provider.request_completions(
|
|
61
|
+
messages=message_history.get_ai_optimized_history(thread_id),
|
|
62
|
+
model=model,
|
|
63
|
+
message_type=MessageType.SMART_DEBUG,
|
|
64
|
+
user_input=error_message,
|
|
65
|
+
thread_id=thread_id
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Process the completion to remove inner thoughts
|
|
69
|
+
display_completion = remove_inner_thoughts_from_message(completion)
|
|
70
|
+
|
|
71
|
+
# Add the response to message history
|
|
72
|
+
ai_response_message: ChatCompletionMessageParam = {
|
|
73
|
+
"role": "assistant",
|
|
74
|
+
"content": completion,
|
|
75
|
+
}
|
|
76
|
+
display_response_message: ChatCompletionMessageParam = {
|
|
77
|
+
"role": "assistant",
|
|
78
|
+
"content": display_completion,
|
|
79
|
+
}
|
|
80
|
+
await message_history.append_message(
|
|
81
|
+
ai_response_message, display_response_message, model, provider, thread_id
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return display_completion
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
async def stream_completion(
|
|
88
|
+
metadata: SmartDebugMetadata,
|
|
89
|
+
provider: OpenAIProvider,
|
|
90
|
+
message_history: GlobalMessageHistory,
|
|
91
|
+
message_id: str,
|
|
92
|
+
reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
|
|
93
|
+
model: str
|
|
94
|
+
) -> str:
|
|
95
|
+
"""Stream smart debug completions from the AI provider.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
metadata: The metadata for the smart debug completion request.
|
|
99
|
+
provider: The AI provider to use.
|
|
100
|
+
message_history: The message history for this conversation.
|
|
101
|
+
message_id: The ID of the message being processed.
|
|
102
|
+
reply_fn: Function to call with each chunk for streaming replies.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
The accumulated response string.
|
|
106
|
+
"""
|
|
107
|
+
error_message = metadata.errorMessage
|
|
108
|
+
active_cell_code = metadata.activeCellCode
|
|
109
|
+
active_cell_id = metadata.activeCellId
|
|
110
|
+
variables = metadata.variables or []
|
|
111
|
+
files = metadata.files or []
|
|
112
|
+
thread_id = metadata.threadId
|
|
113
|
+
|
|
114
|
+
# Add the system message if it doesn't already exist
|
|
115
|
+
await append_chat_system_message(message_history, model, provider, thread_id)
|
|
116
|
+
|
|
117
|
+
# Create the prompt
|
|
118
|
+
prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
|
|
119
|
+
display_prompt = f"```python{active_cell_code or ''}```{error_message}"
|
|
120
|
+
|
|
121
|
+
# Add the prompt to the message history
|
|
122
|
+
new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
|
|
123
|
+
new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
|
|
124
|
+
await message_history.append_message(
|
|
125
|
+
new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Stream the completions using the provider's stream method
|
|
129
|
+
accumulated_response = await provider.stream_completions(
|
|
130
|
+
message_type=MessageType.SMART_DEBUG,
|
|
131
|
+
messages=message_history.get_ai_optimized_history(thread_id),
|
|
132
|
+
model=model,
|
|
133
|
+
message_id=message_id,
|
|
134
|
+
reply_fn=reply_fn,
|
|
135
|
+
user_input=error_message,
|
|
136
|
+
thread_id=thread_id
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Process the completion to remove inner thoughts
|
|
140
|
+
display_completion = remove_inner_thoughts_from_message(accumulated_response)
|
|
141
|
+
|
|
142
|
+
# Save the accumulated response to message history
|
|
143
|
+
ai_response_message: ChatCompletionMessageParam = {
|
|
144
|
+
"role": "assistant",
|
|
145
|
+
"content": accumulated_response,
|
|
146
|
+
}
|
|
147
|
+
display_response_message: ChatCompletionMessageParam = {
|
|
148
|
+
"role": "assistant",
|
|
149
|
+
"content": display_completion,
|
|
150
|
+
}
|
|
151
|
+
await message_history.append_message(
|
|
152
|
+
ai_response_message, display_response_message, model, provider, thread_id
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
return display_completion
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
# Use the static methods directly
|
|
159
|
+
get_smart_debug_completion = SmartDebugHandler.get_completion
|
|
160
|
+
stream_smart_debug_completion = SmartDebugHandler.stream_completion
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import base64
|
|
5
|
+
from typing import Optional, Union, List, Dict, Any, cast
|
|
6
|
+
from mito_ai.completions.message_history import GlobalMessageHistory
|
|
7
|
+
from mito_ai.completions.models import ThreadID
|
|
8
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
9
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
10
|
+
from mito_ai.completions.prompt_builders.chat_system_message import (
|
|
11
|
+
create_chat_system_message_prompt,
|
|
12
|
+
)
|
|
13
|
+
from mito_ai.completions.prompt_builders.agent_system_message import (
|
|
14
|
+
create_agent_system_message_prompt,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
async def append_chat_system_message(
|
|
19
|
+
message_history: GlobalMessageHistory,
|
|
20
|
+
model: str,
|
|
21
|
+
provider: OpenAIProvider,
|
|
22
|
+
thread_id: ThreadID,
|
|
23
|
+
) -> None:
|
|
24
|
+
|
|
25
|
+
# If the system message already exists, do nothing
|
|
26
|
+
if any(
|
|
27
|
+
msg["role"] == "system"
|
|
28
|
+
for msg in message_history.get_ai_optimized_history(thread_id)
|
|
29
|
+
):
|
|
30
|
+
return
|
|
31
|
+
|
|
32
|
+
system_message_prompt = create_chat_system_message_prompt()
|
|
33
|
+
|
|
34
|
+
system_message: ChatCompletionMessageParam = {
|
|
35
|
+
"role": "system",
|
|
36
|
+
"content": system_message_prompt,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
await message_history.append_message(
|
|
40
|
+
ai_optimized_message=system_message,
|
|
41
|
+
display_message=system_message,
|
|
42
|
+
model=model,
|
|
43
|
+
llm_provider=provider,
|
|
44
|
+
thread_id=thread_id,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def append_agent_system_message(
|
|
49
|
+
message_history: GlobalMessageHistory,
|
|
50
|
+
model: str,
|
|
51
|
+
provider: OpenAIProvider,
|
|
52
|
+
thread_id: ThreadID,
|
|
53
|
+
isChromeBrowser: bool,
|
|
54
|
+
) -> None:
|
|
55
|
+
|
|
56
|
+
# If the system message already exists, do nothing
|
|
57
|
+
if any(
|
|
58
|
+
msg["role"] == "system"
|
|
59
|
+
for msg in message_history.get_ai_optimized_history(thread_id)
|
|
60
|
+
):
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
system_message_prompt = create_agent_system_message_prompt(isChromeBrowser)
|
|
64
|
+
|
|
65
|
+
system_message: ChatCompletionMessageParam = {
|
|
66
|
+
"role": "system",
|
|
67
|
+
"content": system_message_prompt,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
await message_history.append_message(
|
|
71
|
+
ai_optimized_message=system_message,
|
|
72
|
+
display_message=system_message,
|
|
73
|
+
model=model,
|
|
74
|
+
llm_provider=provider,
|
|
75
|
+
thread_id=thread_id,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def extract_and_encode_images_from_additional_context(
|
|
80
|
+
additional_context: Optional[List[Dict[str, str]]],
|
|
81
|
+
) -> List[str]:
|
|
82
|
+
encoded_images = []
|
|
83
|
+
|
|
84
|
+
for context in additional_context or []:
|
|
85
|
+
if context["type"].startswith("image/"):
|
|
86
|
+
try:
|
|
87
|
+
with open(context["value"], "rb") as image_file:
|
|
88
|
+
image_data = image_file.read()
|
|
89
|
+
base64_encoded = base64.b64encode(image_data).decode("utf-8")
|
|
90
|
+
encoded_images.append(f"data:{context['type']};base64,{base64_encoded}")
|
|
91
|
+
except (FileNotFoundError, IOError) as e:
|
|
92
|
+
print(f"Error reading image file {context['value']}: {e}")
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
return encoded_images
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def create_ai_optimized_message(
|
|
99
|
+
text: str,
|
|
100
|
+
base64EncodedActiveCellOutput: Optional[str] = None,
|
|
101
|
+
additional_context: Optional[List[Dict[str, str]]] = None,
|
|
102
|
+
) -> ChatCompletionMessageParam:
|
|
103
|
+
|
|
104
|
+
message_content: Union[str, List[Dict[str, Any]]]
|
|
105
|
+
encoded_images = extract_and_encode_images_from_additional_context(
|
|
106
|
+
additional_context
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
has_uploaded_image = len(encoded_images) > 0
|
|
110
|
+
has_active_cell_output = (
|
|
111
|
+
base64EncodedActiveCellOutput is not None
|
|
112
|
+
and base64EncodedActiveCellOutput != ""
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
if has_uploaded_image or has_active_cell_output:
|
|
116
|
+
message_content = [
|
|
117
|
+
{
|
|
118
|
+
"type": "text",
|
|
119
|
+
"text": text,
|
|
120
|
+
}
|
|
121
|
+
]
|
|
122
|
+
|
|
123
|
+
for img in encoded_images:
|
|
124
|
+
message_content.append(
|
|
125
|
+
{
|
|
126
|
+
"type": "image_url",
|
|
127
|
+
"image_url": {
|
|
128
|
+
"url": img
|
|
129
|
+
},
|
|
130
|
+
}
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if has_active_cell_output:
|
|
134
|
+
message_content.append(
|
|
135
|
+
{
|
|
136
|
+
"type": "image_url",
|
|
137
|
+
"image_url": {
|
|
138
|
+
"url": f"data:image/png;base64,{base64EncodedActiveCellOutput}"
|
|
139
|
+
},
|
|
140
|
+
}
|
|
141
|
+
)
|
|
142
|
+
else:
|
|
143
|
+
message_content = text
|
|
144
|
+
|
|
145
|
+
return cast(
|
|
146
|
+
ChatCompletionMessageParam, {"role": "user", "content": message_content}
|
|
147
|
+
)
|