mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +49 -9
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +142 -67
- mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/{app_builder → app_deploy}/models.py +35 -22
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
- mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
- mito_ai/completions/completion_handlers/utils.py +99 -37
- mito_ai/completions/handlers.py +57 -20
- mito_ai/completions/message_history.py +9 -1
- mito_ai/completions/models.py +31 -7
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
- mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
- mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
- mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
- mito_ai/completions/prompt_builders/utils.py +72 -10
- mito_ai/completions/providers.py +81 -47
- mito_ai/constants.py +25 -24
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +44 -48
- mito_ai/log/handlers.py +10 -3
- mito_ai/log/urls.py +3 -3
- mito_ai/openai_client.py +30 -44
- mito_ai/path_utils.py +70 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
- mito_ai/tests/message_history/test_message_history_utils.py +103 -23
- mito_ai/tests/open_ai_utils_test.py +18 -22
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +2 -6
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +31 -3
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/test_anthropic_utils.py +6 -6
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/anthropic_utils.py +55 -121
- mito_ai/utils/create.py +17 -1
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +39 -94
- mito_ai/utils/message_history_utils.py +7 -4
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +38 -155
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/server_limits.py +1 -1
- mito_ai/utils/telemetry_utils.py +137 -5
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
- mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
- mito_ai-0.1.49.dist-info/RECORD +205 -0
- mito_ai/app_builder/handlers.py +0 -218
- mito_ai/tests/providers_test.py +0 -438
- mito_ai/tests/test_anthropic_client.py +0 -270
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
- mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
- mito_ai-0.1.33.dist-info/RECORD +0 -134
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
mito_ai/app_builder/handlers.py
DELETED
|
@@ -1,218 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Saga Inc.
|
|
2
|
-
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
-
|
|
4
|
-
import os
|
|
5
|
-
import time
|
|
6
|
-
import logging
|
|
7
|
-
from typing import Any, Union
|
|
8
|
-
import zipfile
|
|
9
|
-
import tempfile
|
|
10
|
-
from mito_ai.utils.create import initialize_user
|
|
11
|
-
from mito_ai.utils.websocket_base import BaseWebSocketHandler
|
|
12
|
-
from mito_ai.app_builder.models import (
|
|
13
|
-
BuildAppReply,
|
|
14
|
-
AppBuilderError,
|
|
15
|
-
ErrorMessage,
|
|
16
|
-
MessageType
|
|
17
|
-
)
|
|
18
|
-
from mito_ai.logger import get_logger
|
|
19
|
-
import requests
|
|
20
|
-
|
|
21
|
-
# API endpoint for getting pre-signed URL
|
|
22
|
-
API_BASE_URL = "https://fr12uvtfy5.execute-api.us-east-1.amazonaws.com"
|
|
23
|
-
|
|
24
|
-
class AppBuilderHandler(BaseWebSocketHandler):
|
|
25
|
-
"""Handler for app building requests."""
|
|
26
|
-
|
|
27
|
-
def initialize(self) -> None:
|
|
28
|
-
"""Initialize the WebSocket handler."""
|
|
29
|
-
super().initialize()
|
|
30
|
-
self.log.debug("Initializing app builder websocket connection %s", self.request.path)
|
|
31
|
-
|
|
32
|
-
@property
|
|
33
|
-
def log(self) -> logging.Logger:
|
|
34
|
-
"""Use Mito AI logger."""
|
|
35
|
-
return get_logger()
|
|
36
|
-
|
|
37
|
-
async def get(self, *args: Any, **kwargs: Any) -> None:
|
|
38
|
-
"""Get an event to open a socket or check service availability."""
|
|
39
|
-
# Check if this is just a service availability check
|
|
40
|
-
if self.get_query_argument('check_availability', None) == 'true':
|
|
41
|
-
self.set_status(200)
|
|
42
|
-
self.finish()
|
|
43
|
-
return
|
|
44
|
-
|
|
45
|
-
await super().pre_get() # Authenticate and authorize
|
|
46
|
-
initialize_user() # Initialize user directory structure
|
|
47
|
-
|
|
48
|
-
reply = super().get(*args, **kwargs)
|
|
49
|
-
if reply is not None:
|
|
50
|
-
await reply
|
|
51
|
-
|
|
52
|
-
async def on_message(self, message: Union[str, bytes]) -> None:
|
|
53
|
-
"""Handle incoming messages on the WebSocket.
|
|
54
|
-
|
|
55
|
-
Args:
|
|
56
|
-
message: The message received on the WebSocket.
|
|
57
|
-
"""
|
|
58
|
-
start = time.time()
|
|
59
|
-
|
|
60
|
-
# Convert bytes to string if needed
|
|
61
|
-
if isinstance(message, bytes):
|
|
62
|
-
message = message.decode('utf-8')
|
|
63
|
-
|
|
64
|
-
self.log.debug("App builder message received: %s", message)
|
|
65
|
-
|
|
66
|
-
try:
|
|
67
|
-
parsed_message = self.parse_message(message)
|
|
68
|
-
message_type = parsed_message.get('type')
|
|
69
|
-
|
|
70
|
-
if message_type == MessageType.BUILD_APP.value:
|
|
71
|
-
# Handle build app request
|
|
72
|
-
await self._handle_build_app(parsed_message)
|
|
73
|
-
else:
|
|
74
|
-
self.log.error(f"Unknown message type: {message_type}")
|
|
75
|
-
error = AppBuilderError(
|
|
76
|
-
error_type="InvalidRequest",
|
|
77
|
-
title=f"Unknown message type: {message_type}"
|
|
78
|
-
)
|
|
79
|
-
self.reply(ErrorMessage(**error.__dict__))
|
|
80
|
-
|
|
81
|
-
except ValueError as e:
|
|
82
|
-
self.log.error("Invalid app builder request", exc_info=e)
|
|
83
|
-
error = AppBuilderError.from_exception(e)
|
|
84
|
-
self.reply(ErrorMessage(**error.__dict__))
|
|
85
|
-
except Exception as e:
|
|
86
|
-
self.log.error("Error handling app builder message", exc_info=e)
|
|
87
|
-
error = AppBuilderError.from_exception(
|
|
88
|
-
e,
|
|
89
|
-
hint="An error occurred while building the app. Please check the logs for details."
|
|
90
|
-
)
|
|
91
|
-
self.reply(ErrorMessage(**error.__dict__))
|
|
92
|
-
|
|
93
|
-
latency_ms = round((time.time() - start) * 1000)
|
|
94
|
-
self.log.info(f"App builder handler processed in {latency_ms} ms.")
|
|
95
|
-
|
|
96
|
-
async def _handle_build_app(self, message: dict) -> None:
|
|
97
|
-
"""Handle a build app request.
|
|
98
|
-
|
|
99
|
-
Args:
|
|
100
|
-
message: The parsed message.
|
|
101
|
-
"""
|
|
102
|
-
message_id = message.get('message_id', '') # Default to empty string if not present
|
|
103
|
-
app_path = message.get('path')
|
|
104
|
-
|
|
105
|
-
if not message_id:
|
|
106
|
-
self.log.error("Missing message_id in request")
|
|
107
|
-
return
|
|
108
|
-
|
|
109
|
-
if not app_path:
|
|
110
|
-
error = AppBuilderError(
|
|
111
|
-
error_type="InvalidRequest",
|
|
112
|
-
title="Missing 'path' parameter"
|
|
113
|
-
)
|
|
114
|
-
self.reply(BuildAppReply(
|
|
115
|
-
parent_id=message_id,
|
|
116
|
-
url="",
|
|
117
|
-
error=error
|
|
118
|
-
))
|
|
119
|
-
return
|
|
120
|
-
|
|
121
|
-
try:
|
|
122
|
-
# This is a placeholder for the actual app building logic
|
|
123
|
-
# In a real implementation, this would deploy the app to a hosting service
|
|
124
|
-
# and return the URL
|
|
125
|
-
deploy_url = await self._deploy_app(app_path)
|
|
126
|
-
|
|
127
|
-
# Send the response
|
|
128
|
-
self.reply(BuildAppReply(
|
|
129
|
-
parent_id=message_id,
|
|
130
|
-
url=deploy_url
|
|
131
|
-
))
|
|
132
|
-
|
|
133
|
-
except Exception as e:
|
|
134
|
-
self.log.error(f"Error building app: {e}", exc_info=e)
|
|
135
|
-
error = AppBuilderError.from_exception(e)
|
|
136
|
-
self.reply(BuildAppReply(
|
|
137
|
-
parent_id=message_id,
|
|
138
|
-
url="",
|
|
139
|
-
error=error
|
|
140
|
-
))
|
|
141
|
-
|
|
142
|
-
async def _deploy_app(self, app_path: str) -> str:
|
|
143
|
-
"""Deploy the app using pre-signed URLs.
|
|
144
|
-
|
|
145
|
-
Args:
|
|
146
|
-
app_path: Path to the app file.
|
|
147
|
-
|
|
148
|
-
Returns:
|
|
149
|
-
The URL of the deployed app.
|
|
150
|
-
"""
|
|
151
|
-
# Get app name from the path
|
|
152
|
-
app_name = os.path.basename(app_path).split('.')[0]
|
|
153
|
-
self.log.info(f"Deploying app: {app_name} from path: {app_path}")
|
|
154
|
-
|
|
155
|
-
try:
|
|
156
|
-
# Step 1: Get pre-signed URL from API
|
|
157
|
-
self.log.info("Getting pre-signed upload URL...")
|
|
158
|
-
url_response = requests.get(f"{API_BASE_URL}/get-upload-url?app_name={app_name}")
|
|
159
|
-
url_response.raise_for_status()
|
|
160
|
-
|
|
161
|
-
url_data = url_response.json()
|
|
162
|
-
presigned_url = url_data['upload_url']
|
|
163
|
-
expected_app_url = url_data['expected_app_url']
|
|
164
|
-
|
|
165
|
-
self.log.info(f"Received pre-signed URL. App will be available at: {expected_app_url}")
|
|
166
|
-
|
|
167
|
-
# Step 2: Create a zip file of the app.
|
|
168
|
-
temp_zip_path = None
|
|
169
|
-
try:
|
|
170
|
-
# Create temp file and close it before writing to avoid file handle conflicts
|
|
171
|
-
with tempfile.NamedTemporaryFile(suffix='.zip', delete=False) as temp_zip:
|
|
172
|
-
temp_zip_path = temp_zip.name
|
|
173
|
-
|
|
174
|
-
self.log.info("Zipping application files...")
|
|
175
|
-
with zipfile.ZipFile(temp_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
|
176
|
-
for root, _, files in os.walk(app_path):
|
|
177
|
-
for file in files:
|
|
178
|
-
file_path = os.path.join(root, file)
|
|
179
|
-
zipf.write(file_path, arcname=os.path.relpath(file_path, app_path))
|
|
180
|
-
|
|
181
|
-
upload_response = await self._upload_app_to_s3(temp_zip_path, presigned_url)
|
|
182
|
-
except Exception as e:
|
|
183
|
-
self.log.error(f"Error zipping app: {e}")
|
|
184
|
-
raise
|
|
185
|
-
finally:
|
|
186
|
-
# Clean up
|
|
187
|
-
if temp_zip_path is not None:
|
|
188
|
-
os.remove(temp_zip_path)
|
|
189
|
-
|
|
190
|
-
self.log.info(f"Upload successful! Status code: {upload_response.status_code}")
|
|
191
|
-
|
|
192
|
-
self.log.info(f"Deployment initiated. App will be available at: {expected_app_url}")
|
|
193
|
-
return expected_app_url # type: ignore
|
|
194
|
-
|
|
195
|
-
except requests.exceptions.RequestException as e:
|
|
196
|
-
self.log.error(f"Error during API request: {e}")
|
|
197
|
-
if hasattr(e, 'response') and e.response is not None:
|
|
198
|
-
try:
|
|
199
|
-
error_detail = e.response.json()
|
|
200
|
-
self.log.error(f"Server error details: {error_detail}")
|
|
201
|
-
except:
|
|
202
|
-
self.log.error(f"Server response: {e.response.text}")
|
|
203
|
-
raise Exception(f"Deployment failed: {str(e)}")
|
|
204
|
-
except Exception as e:
|
|
205
|
-
self.log.error(f"Error during deployment: {str(e)}")
|
|
206
|
-
raise
|
|
207
|
-
|
|
208
|
-
async def _upload_app_to_s3(self, app_path: str, presigned_url: str) -> requests.Response:
|
|
209
|
-
"""Upload the app to S3 using the presigned URL."""
|
|
210
|
-
with open(app_path, 'rb') as file_data:
|
|
211
|
-
upload_response = requests.put(
|
|
212
|
-
presigned_url,
|
|
213
|
-
data=file_data,
|
|
214
|
-
headers={'Content-Type': 'application/zip'}
|
|
215
|
-
)
|
|
216
|
-
upload_response.raise_for_status()
|
|
217
|
-
|
|
218
|
-
return upload_response
|
mito_ai/tests/providers_test.py
DELETED
|
@@ -1,438 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Saga Inc.
|
|
2
|
-
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
-
|
|
4
|
-
from __future__ import annotations
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from typing import Any, List
|
|
7
|
-
from unittest.mock import patch, MagicMock, AsyncMock
|
|
8
|
-
|
|
9
|
-
import pytest
|
|
10
|
-
from traitlets.config import Config
|
|
11
|
-
from mito_ai.completions.providers import OpenAIProvider
|
|
12
|
-
from mito_ai.completions.models import (
|
|
13
|
-
MessageType,
|
|
14
|
-
AICapabilities,
|
|
15
|
-
CompletionReply
|
|
16
|
-
)
|
|
17
|
-
from mito_ai.completions.providers import OpenAIProvider
|
|
18
|
-
from mito_ai.completions.models import MessageType, AICapabilities
|
|
19
|
-
from mito_ai.utils.server_limits import OS_MONTHLY_AI_COMPLETIONS_LIMIT
|
|
20
|
-
from openai.types.chat import ChatCompletionMessageParam
|
|
21
|
-
|
|
22
|
-
REALLY_OLD_DATE = "2020-01-01"
|
|
23
|
-
TODAY = datetime.now().strftime("%Y-%m-%d")
|
|
24
|
-
FAKE_API_KEY = "sk-1234567890"
|
|
25
|
-
|
|
26
|
-
@pytest.fixture
|
|
27
|
-
def provider_config() -> Config:
|
|
28
|
-
"""Create a proper Config object for the OpenAIProvider."""
|
|
29
|
-
config = Config()
|
|
30
|
-
config.OpenAIProvider = Config()
|
|
31
|
-
config.OpenAIClient = Config()
|
|
32
|
-
return config
|
|
33
|
-
|
|
34
|
-
@pytest.fixture(autouse=True)
|
|
35
|
-
def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
36
|
-
for var in [
|
|
37
|
-
"OPENAI_API_KEY", "CLAUDE_API_KEY",
|
|
38
|
-
"GEMINI_API_KEY", "OLLAMA_MODEL",
|
|
39
|
-
"AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_MODEL"
|
|
40
|
-
]:
|
|
41
|
-
monkeypatch.delenv(var, raising=False)
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
def patch_server_limits(is_pro: bool = False, completion_count: int = 1, first_date: str = TODAY) -> Any:
|
|
45
|
-
return patch.multiple(
|
|
46
|
-
"mito_ai.utils.server_limits",
|
|
47
|
-
get_chat_completion_count=MagicMock(return_value=completion_count),
|
|
48
|
-
get_first_completion_date=MagicMock(return_value=first_date),
|
|
49
|
-
is_pro=MagicMock(return_value=is_pro),
|
|
50
|
-
check_mito_server_quota=MagicMock(return_value=None)
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
def patch_openai_model_list() -> Any:
|
|
55
|
-
mock_openai_instance = MagicMock()
|
|
56
|
-
mock_openai_instance.models.list.return_value = [MagicMock(id="gpt-4o-mini")]
|
|
57
|
-
|
|
58
|
-
# Patch the constructor call to return your mock instance
|
|
59
|
-
return patch("openai.OpenAI", return_value=mock_openai_instance)
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def mock_openai_client() -> Any:
|
|
63
|
-
"""Mock the OpenAI client with user key capabilities."""
|
|
64
|
-
mock_client = MagicMock()
|
|
65
|
-
mock_client.capabilities = AICapabilities(
|
|
66
|
-
configuration={"model": "gpt-4o-mini"},
|
|
67
|
-
provider="OpenAI with user key",
|
|
68
|
-
type="ai_capabilities"
|
|
69
|
-
)
|
|
70
|
-
mock_client.key_type = "user"
|
|
71
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
72
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
73
|
-
return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def mock_gemini_client() -> Any:
|
|
77
|
-
"""Mock the Gemini client capabilities."""
|
|
78
|
-
mock_client = MagicMock()
|
|
79
|
-
mock_client.capabilities = AICapabilities(
|
|
80
|
-
configuration={"model": "gemini-2-pro"},
|
|
81
|
-
provider="Gemini",
|
|
82
|
-
type="ai_capabilities"
|
|
83
|
-
)
|
|
84
|
-
mock_client.key_type = "gemini"
|
|
85
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
86
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
87
|
-
return patch("mito_ai.completions.providers.GeminiClient", return_value=mock_client)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def mock_azure_openai_client() -> Any:
|
|
91
|
-
"""Mock the Azure OpenAI client capabilities."""
|
|
92
|
-
mock_client = MagicMock()
|
|
93
|
-
mock_client.capabilities = AICapabilities(
|
|
94
|
-
configuration={"model": "gpt-4o"},
|
|
95
|
-
provider="Azure OpenAI",
|
|
96
|
-
type="ai_capabilities"
|
|
97
|
-
)
|
|
98
|
-
mock_client.key_type = "azure"
|
|
99
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
100
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
101
|
-
return patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def mock_claude_client() -> Any:
|
|
106
|
-
"""Mock the Claude client capabilities."""
|
|
107
|
-
mock_client = MagicMock()
|
|
108
|
-
mock_client.capabilities = AICapabilities(
|
|
109
|
-
configuration={"model": "claude-3-opus-20240229"},
|
|
110
|
-
provider="Claude",
|
|
111
|
-
type="ai_capabilities"
|
|
112
|
-
)
|
|
113
|
-
mock_client.key_type = "claude"
|
|
114
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
115
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
116
|
-
mock_client.stream_response = AsyncMock(return_value="Test completion")
|
|
117
|
-
return patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
def test_os_user_openai_key_set_below_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
121
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
122
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
123
|
-
|
|
124
|
-
with (
|
|
125
|
-
patch_server_limits(is_pro=False, completion_count=1),
|
|
126
|
-
mock_openai_client()
|
|
127
|
-
):
|
|
128
|
-
llm = OpenAIProvider(config=provider_config)
|
|
129
|
-
capabilities = llm.capabilities
|
|
130
|
-
assert "user key" in capabilities.provider
|
|
131
|
-
assert llm.last_error is None
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def test_os_user_openai_key_set_above_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
135
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
136
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
137
|
-
|
|
138
|
-
with (
|
|
139
|
-
patch_server_limits(is_pro=False, completion_count=OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1),
|
|
140
|
-
mock_openai_client()
|
|
141
|
-
):
|
|
142
|
-
llm = OpenAIProvider(config=provider_config)
|
|
143
|
-
capabilities = llm.capabilities
|
|
144
|
-
assert "user key" in capabilities.provider
|
|
145
|
-
assert llm.last_error is None
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
def test_pro_user_openai_key_set_below_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
149
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
150
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
151
|
-
|
|
152
|
-
with (
|
|
153
|
-
patch_server_limits(is_pro=True, completion_count=1),
|
|
154
|
-
mock_openai_client()
|
|
155
|
-
):
|
|
156
|
-
llm = OpenAIProvider(config=provider_config)
|
|
157
|
-
capabilities = llm.capabilities
|
|
158
|
-
assert "user key" in capabilities.provider
|
|
159
|
-
assert llm.last_error is None
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
def test_pro_user_openai_key_set_above_limit(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
163
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
164
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
165
|
-
|
|
166
|
-
with (
|
|
167
|
-
patch_server_limits(is_pro=True, completion_count=OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1),
|
|
168
|
-
mock_openai_client()
|
|
169
|
-
):
|
|
170
|
-
llm = OpenAIProvider(config=provider_config)
|
|
171
|
-
capabilities = llm.capabilities
|
|
172
|
-
assert "user key" in capabilities.provider
|
|
173
|
-
assert llm.last_error is None
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
def test_gemini_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
177
|
-
monkeypatch.setenv("GEMINI_API_KEY", "gemini-key")
|
|
178
|
-
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", "gemini-key")
|
|
179
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
180
|
-
|
|
181
|
-
with mock_gemini_client():
|
|
182
|
-
llm = OpenAIProvider(config=provider_config)
|
|
183
|
-
capabilities = llm.capabilities
|
|
184
|
-
assert capabilities.provider == "Gemini"
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
def test_azure_openai_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
188
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: True)
|
|
189
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
|
|
190
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_KEY", FAKE_API_KEY)
|
|
191
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_ENDPOINT", "https://example.com")
|
|
192
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_MODEL", "gpt-4o")
|
|
193
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_VERSION", "2024-12-01-preview")
|
|
194
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
195
|
-
|
|
196
|
-
with mock_azure_openai_client():
|
|
197
|
-
llm = OpenAIProvider(config=provider_config)
|
|
198
|
-
capabilities = llm.capabilities
|
|
199
|
-
assert capabilities.provider == "Azure OpenAI"
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def test_claude_provider(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
203
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
|
|
204
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
|
|
205
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
206
|
-
|
|
207
|
-
with mock_claude_client():
|
|
208
|
-
llm = OpenAIProvider(config=provider_config)
|
|
209
|
-
capabilities = llm.capabilities
|
|
210
|
-
assert capabilities.provider == "Claude"
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
def test_provider_priority_order(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
214
|
-
# Set up all possible providers
|
|
215
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: True)
|
|
216
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: True)
|
|
217
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_KEY", FAKE_API_KEY)
|
|
218
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_ENDPOINT", "https://example.com")
|
|
219
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_MODEL", "gpt-4o")
|
|
220
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.AZURE_OPENAI_API_VERSION", "2024-12-01-preview")
|
|
221
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
222
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
223
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
|
|
224
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
|
|
225
|
-
|
|
226
|
-
# Azure OpenAI should have highest priority when enterprise is enabled
|
|
227
|
-
# Clear other provider settings to ensure Azure OpenAI is selected
|
|
228
|
-
monkeypatch.delenv("GEMINI_API_KEY", raising=False)
|
|
229
|
-
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
230
|
-
# Clear Claude settings to ensure Azure OpenAI is selected
|
|
231
|
-
monkeypatch.delenv("CLAUDE_API_KEY", raising=False)
|
|
232
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
|
|
233
|
-
with mock_azure_openai_client():
|
|
234
|
-
llm = OpenAIProvider(config=provider_config)
|
|
235
|
-
capabilities = llm.capabilities
|
|
236
|
-
assert capabilities.provider == "Azure OpenAI"
|
|
237
|
-
|
|
238
|
-
# Without enterprise, OpenAI should have highest priority
|
|
239
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_enterprise", lambda: False)
|
|
240
|
-
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
241
|
-
with mock_openai_client():
|
|
242
|
-
llm = OpenAIProvider(config=provider_config)
|
|
243
|
-
capabilities = llm.capabilities
|
|
244
|
-
assert capabilities.provider == "OpenAI with user key"
|
|
245
|
-
|
|
246
|
-
# Without OpenAI key, Claude should be used (higher priority than Gemini)
|
|
247
|
-
monkeypatch.delenv("OPENAI_API_KEY")
|
|
248
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
249
|
-
# Ensure provider_config doesn't have an api_key set
|
|
250
|
-
provider_config.OpenAIProvider.api_key = None
|
|
251
|
-
# Re-enable Claude settings
|
|
252
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
|
|
253
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
|
|
254
|
-
with mock_claude_client():
|
|
255
|
-
llm = OpenAIProvider(config=provider_config)
|
|
256
|
-
capabilities = llm.capabilities
|
|
257
|
-
assert capabilities.provider == "Claude"
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
@pytest.mark.asyncio
|
|
261
|
-
async def test_completion_request(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
262
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
263
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
264
|
-
|
|
265
|
-
mock_client = MagicMock()
|
|
266
|
-
mock_client.capabilities = AICapabilities(
|
|
267
|
-
configuration={"model": "gpt-4o-mini"},
|
|
268
|
-
provider="OpenAI with user key",
|
|
269
|
-
type="ai_capabilities"
|
|
270
|
-
)
|
|
271
|
-
mock_client.key_type = "user"
|
|
272
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
273
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
274
|
-
|
|
275
|
-
with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
|
|
276
|
-
llm = OpenAIProvider(config=provider_config)
|
|
277
|
-
messages: List[ChatCompletionMessageParam] = [
|
|
278
|
-
{"role": "user", "content": "Test message"}
|
|
279
|
-
]
|
|
280
|
-
|
|
281
|
-
completion = await llm.request_completions(
|
|
282
|
-
message_type=MessageType.CHAT,
|
|
283
|
-
messages=messages,
|
|
284
|
-
model="gpt-4o-mini"
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
assert completion == "Test completion"
|
|
288
|
-
mock_client.request_completions.assert_called_once()
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
@pytest.mark.asyncio
|
|
292
|
-
async def test_stream_completion(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
293
|
-
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
294
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
295
|
-
|
|
296
|
-
mock_client = MagicMock()
|
|
297
|
-
mock_client.capabilities = AICapabilities(
|
|
298
|
-
configuration={"model": "gpt-4o-mini"},
|
|
299
|
-
provider="OpenAI with user key",
|
|
300
|
-
type="ai_capabilities"
|
|
301
|
-
)
|
|
302
|
-
mock_client.key_type = "user"
|
|
303
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
304
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
305
|
-
|
|
306
|
-
with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
|
|
307
|
-
llm = OpenAIProvider(config=provider_config)
|
|
308
|
-
messages: List[ChatCompletionMessageParam] = [
|
|
309
|
-
{"role": "user", "content": "Test message"}
|
|
310
|
-
]
|
|
311
|
-
|
|
312
|
-
reply_chunks = []
|
|
313
|
-
def mock_reply(chunk):
|
|
314
|
-
reply_chunks.append(chunk)
|
|
315
|
-
|
|
316
|
-
completion = await llm.stream_completions(
|
|
317
|
-
message_type=MessageType.CHAT,
|
|
318
|
-
messages=messages,
|
|
319
|
-
model="gpt-4o-mini",
|
|
320
|
-
message_id="test-id",
|
|
321
|
-
thread_id="test-thread",
|
|
322
|
-
reply_fn=mock_reply
|
|
323
|
-
)
|
|
324
|
-
|
|
325
|
-
assert completion == "Test completion"
|
|
326
|
-
mock_client.stream_completions.assert_called_once()
|
|
327
|
-
assert len(reply_chunks) > 0
|
|
328
|
-
assert isinstance(reply_chunks[0], CompletionReply)
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
def test_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
332
|
-
monkeypatch.setenv("OPENAI_API_KEY", "invalid-key")
|
|
333
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "invalid-key")
|
|
334
|
-
mock_client = MagicMock()
|
|
335
|
-
mock_client.capabilities = AICapabilities(
|
|
336
|
-
configuration={"model": "gpt-4o-mini"},
|
|
337
|
-
provider="OpenAI with user key",
|
|
338
|
-
type="ai_capabilities"
|
|
339
|
-
)
|
|
340
|
-
mock_client.key_type = "user"
|
|
341
|
-
mock_client.request_completions.side_effect = Exception("API error")
|
|
342
|
-
|
|
343
|
-
with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
|
|
344
|
-
llm = OpenAIProvider(config=provider_config)
|
|
345
|
-
assert llm.last_error is None # Error should be None until a request is made
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
@pytest.mark.asyncio
|
|
349
|
-
async def test_claude_completion_request(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
350
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
|
|
351
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
|
|
352
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
353
|
-
|
|
354
|
-
mock_client = MagicMock()
|
|
355
|
-
mock_client.capabilities = AICapabilities(
|
|
356
|
-
configuration={"model": "claude-3-opus-20240229"},
|
|
357
|
-
provider="Claude",
|
|
358
|
-
type="ai_capabilities"
|
|
359
|
-
)
|
|
360
|
-
mock_client.key_type = "claude"
|
|
361
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
362
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
363
|
-
|
|
364
|
-
with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
|
|
365
|
-
llm = OpenAIProvider(config=provider_config)
|
|
366
|
-
messages: List[ChatCompletionMessageParam] = [
|
|
367
|
-
{"role": "user", "content": "Test message"}
|
|
368
|
-
]
|
|
369
|
-
|
|
370
|
-
completion = await llm.request_completions(
|
|
371
|
-
message_type=MessageType.CHAT,
|
|
372
|
-
messages=messages,
|
|
373
|
-
model="claude-3-opus-20240229"
|
|
374
|
-
)
|
|
375
|
-
|
|
376
|
-
assert completion == "Test completion"
|
|
377
|
-
mock_client.request_completions.assert_called_once()
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
@pytest.mark.asyncio
|
|
381
|
-
async def test_claude_stream_completion(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
382
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "claude-key")
|
|
383
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "claude-key")
|
|
384
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
385
|
-
|
|
386
|
-
mock_client = MagicMock()
|
|
387
|
-
mock_client.capabilities = AICapabilities(
|
|
388
|
-
configuration={"model": "claude-3-opus-20240229"},
|
|
389
|
-
provider="Claude",
|
|
390
|
-
type="ai_capabilities"
|
|
391
|
-
)
|
|
392
|
-
mock_client.key_type = "claude"
|
|
393
|
-
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
394
|
-
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
395
|
-
mock_client.stream_response = AsyncMock(return_value="Test completion")
|
|
396
|
-
|
|
397
|
-
with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
|
|
398
|
-
llm = OpenAIProvider(config=provider_config)
|
|
399
|
-
messages: List[ChatCompletionMessageParam] = [
|
|
400
|
-
{"role": "user", "content": "Test message"}
|
|
401
|
-
]
|
|
402
|
-
|
|
403
|
-
reply_chunks = []
|
|
404
|
-
def mock_reply(chunk):
|
|
405
|
-
reply_chunks.append(chunk)
|
|
406
|
-
|
|
407
|
-
completion = await llm.stream_completions(
|
|
408
|
-
message_type=MessageType.CHAT,
|
|
409
|
-
messages=messages,
|
|
410
|
-
model="claude-3-opus-20240229",
|
|
411
|
-
message_id="test-id",
|
|
412
|
-
thread_id="test-thread",
|
|
413
|
-
reply_fn=mock_reply
|
|
414
|
-
)
|
|
415
|
-
|
|
416
|
-
assert completion == "Test completion"
|
|
417
|
-
mock_client.stream_response.assert_called_once()
|
|
418
|
-
assert len(reply_chunks) > 0
|
|
419
|
-
assert isinstance(reply_chunks[0], CompletionReply)
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
def test_claude_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
423
|
-
monkeypatch.setenv("CLAUDE_API_KEY", "invalid-key")
|
|
424
|
-
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "invalid-key")
|
|
425
|
-
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
426
|
-
|
|
427
|
-
mock_client = MagicMock()
|
|
428
|
-
mock_client.capabilities = AICapabilities(
|
|
429
|
-
configuration={"model": "claude-3-opus-20240229"},
|
|
430
|
-
provider="Claude",
|
|
431
|
-
type="ai_capabilities"
|
|
432
|
-
)
|
|
433
|
-
mock_client.key_type = "claude"
|
|
434
|
-
mock_client.request_completions.side_effect = Exception("API error")
|
|
435
|
-
|
|
436
|
-
with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
|
|
437
|
-
llm = OpenAIProvider(config=provider_config)
|
|
438
|
-
assert llm.last_error is None # Error should be None until a request is made
|