mito-ai 0.1.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +114 -0
- mito_ai/_version.py +4 -0
- mito_ai/anthropic_client.py +334 -0
- mito_ai/app_deploy/__init__.py +6 -0
- mito_ai/app_deploy/app_deploy_utils.py +44 -0
- mito_ai/app_deploy/handlers.py +345 -0
- mito_ai/app_deploy/models.py +98 -0
- mito_ai/app_manager/__init__.py +4 -0
- mito_ai/app_manager/handlers.py +167 -0
- mito_ai/app_manager/models.py +71 -0
- mito_ai/app_manager/utils.py +24 -0
- mito_ai/auth/README.md +18 -0
- mito_ai/auth/__init__.py +6 -0
- mito_ai/auth/handlers.py +96 -0
- mito_ai/auth/urls.py +13 -0
- mito_ai/chat_history/handlers.py +63 -0
- mito_ai/chat_history/urls.py +32 -0
- mito_ai/completions/completion_handlers/__init__.py +3 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +59 -0
- mito_ai/completions/completion_handlers/agent_execution_handler.py +66 -0
- mito_ai/completions/completion_handlers/chat_completion_handler.py +141 -0
- mito_ai/completions/completion_handlers/code_explain_handler.py +113 -0
- mito_ai/completions/completion_handlers/completion_handler.py +42 -0
- mito_ai/completions/completion_handlers/inline_completer_handler.py +48 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +160 -0
- mito_ai/completions/completion_handlers/utils.py +147 -0
- mito_ai/completions/handlers.py +415 -0
- mito_ai/completions/message_history.py +401 -0
- mito_ai/completions/models.py +404 -0
- mito_ai/completions/prompt_builders/__init__.py +3 -0
- mito_ai/completions/prompt_builders/agent_execution_prompt.py +57 -0
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +160 -0
- mito_ai/completions/prompt_builders/agent_system_message.py +472 -0
- mito_ai/completions/prompt_builders/chat_name_prompt.py +15 -0
- mito_ai/completions/prompt_builders/chat_prompt.py +116 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +92 -0
- mito_ai/completions/prompt_builders/explain_code_prompt.py +32 -0
- mito_ai/completions/prompt_builders/inline_completer_prompt.py +197 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +170 -0
- mito_ai/completions/prompt_builders/smart_debug_prompt.py +199 -0
- mito_ai/completions/prompt_builders/utils.py +84 -0
- mito_ai/completions/providers.py +284 -0
- mito_ai/constants.py +63 -0
- mito_ai/db/__init__.py +3 -0
- mito_ai/db/crawlers/__init__.py +6 -0
- mito_ai/db/crawlers/base_crawler.py +61 -0
- mito_ai/db/crawlers/constants.py +43 -0
- mito_ai/db/crawlers/snowflake.py +71 -0
- mito_ai/db/handlers.py +168 -0
- mito_ai/db/models.py +31 -0
- mito_ai/db/urls.py +34 -0
- mito_ai/db/utils.py +185 -0
- mito_ai/docker/mssql/compose.yml +37 -0
- mito_ai/docker/mssql/init/setup.sql +21 -0
- mito_ai/docker/mysql/compose.yml +18 -0
- mito_ai/docker/mysql/init/setup.sql +13 -0
- mito_ai/docker/oracle/compose.yml +17 -0
- mito_ai/docker/oracle/init/setup.sql +20 -0
- mito_ai/docker/postgres/compose.yml +17 -0
- mito_ai/docker/postgres/init/setup.sql +13 -0
- mito_ai/enterprise/__init__.py +3 -0
- mito_ai/enterprise/utils.py +15 -0
- mito_ai/file_uploads/__init__.py +3 -0
- mito_ai/file_uploads/handlers.py +248 -0
- mito_ai/file_uploads/urls.py +21 -0
- mito_ai/gemini_client.py +232 -0
- mito_ai/log/handlers.py +38 -0
- mito_ai/log/urls.py +21 -0
- mito_ai/logger.py +37 -0
- mito_ai/openai_client.py +382 -0
- mito_ai/path_utils.py +70 -0
- mito_ai/rules/handlers.py +44 -0
- mito_ai/rules/urls.py +22 -0
- mito_ai/rules/utils.py +56 -0
- mito_ai/settings/handlers.py +41 -0
- mito_ai/settings/urls.py +20 -0
- mito_ai/settings/utils.py +42 -0
- mito_ai/streamlit_conversion/agent_utils.py +37 -0
- mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
- mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
- mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
- mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
- mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
- mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
- mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
- mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
- mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
- mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
- mito_ai/streamlit_preview/__init__.py +6 -0
- mito_ai/streamlit_preview/handlers.py +111 -0
- mito_ai/streamlit_preview/manager.py +152 -0
- mito_ai/streamlit_preview/urls.py +22 -0
- mito_ai/streamlit_preview/utils.py +29 -0
- mito_ai/tests/__init__.py +3 -0
- mito_ai/tests/chat_history/test_chat_history.py +211 -0
- mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
- mito_ai/tests/conftest.py +53 -0
- mito_ai/tests/create_agent_system_message_prompt_test.py +22 -0
- mito_ai/tests/data/prompt_lg.py +69 -0
- mito_ai/tests/data/prompt_sm.py +6 -0
- mito_ai/tests/data/prompt_xl.py +13 -0
- mito_ai/tests/data/stock_data.sqlite3 +0 -0
- mito_ai/tests/db/conftest.py +39 -0
- mito_ai/tests/db/connections_test.py +102 -0
- mito_ai/tests/db/mssql_test.py +29 -0
- mito_ai/tests/db/mysql_test.py +29 -0
- mito_ai/tests/db/oracle_test.py +29 -0
- mito_ai/tests/db/postgres_test.py +29 -0
- mito_ai/tests/db/schema_test.py +93 -0
- mito_ai/tests/db/sqlite_test.py +31 -0
- mito_ai/tests/db/test_db_constants.py +61 -0
- mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
- mito_ai/tests/file_uploads/__init__.py +2 -0
- mito_ai/tests/file_uploads/test_handlers.py +282 -0
- mito_ai/tests/message_history/test_generate_short_chat_name.py +120 -0
- mito_ai/tests/message_history/test_message_history_utils.py +469 -0
- mito_ai/tests/open_ai_utils_test.py +152 -0
- mito_ai/tests/performance_test.py +329 -0
- mito_ai/tests/providers/test_anthropic_client.py +447 -0
- mito_ai/tests/providers/test_azure.py +631 -0
- mito_ai/tests/providers/test_capabilities.py +120 -0
- mito_ai/tests/providers/test_gemini_client.py +195 -0
- mito_ai/tests/providers/test_mito_server_utils.py +448 -0
- mito_ai/tests/providers/test_model_resolution.py +130 -0
- mito_ai/tests/providers/test_openai_client.py +57 -0
- mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
- mito_ai/tests/providers/test_provider_limits.py +42 -0
- mito_ai/tests/providers/test_providers.py +382 -0
- mito_ai/tests/providers/test_retry_logic.py +389 -0
- mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
- mito_ai/tests/providers/utils.py +85 -0
- mito_ai/tests/rules/conftest.py +26 -0
- mito_ai/tests/rules/rules_test.py +117 -0
- mito_ai/tests/server_limits_test.py +406 -0
- mito_ai/tests/settings/conftest.py +26 -0
- mito_ai/tests/settings/settings_test.py +70 -0
- mito_ai/tests/settings/test_settings_constants.py +9 -0
- mito_ai/tests/streamlit_conversion/__init__.py +3 -0
- mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
- mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
- mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
- mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
- mito_ai/tests/test_constants.py +47 -0
- mito_ai/tests/test_telemetry.py +12 -0
- mito_ai/tests/user/__init__.py +2 -0
- mito_ai/tests/user/test_user.py +120 -0
- mito_ai/tests/utils/__init__.py +3 -0
- mito_ai/tests/utils/test_anthropic_utils.py +162 -0
- mito_ai/tests/utils/test_gemini_utils.py +98 -0
- mito_ai/tests/version_check_test.py +169 -0
- mito_ai/user/handlers.py +45 -0
- mito_ai/user/urls.py +21 -0
- mito_ai/utils/__init__.py +3 -0
- mito_ai/utils/anthropic_utils.py +168 -0
- mito_ai/utils/create.py +94 -0
- mito_ai/utils/db.py +74 -0
- mito_ai/utils/error_classes.py +42 -0
- mito_ai/utils/gemini_utils.py +133 -0
- mito_ai/utils/message_history_utils.py +87 -0
- mito_ai/utils/mito_server_utils.py +242 -0
- mito_ai/utils/open_ai_utils.py +200 -0
- mito_ai/utils/provider_utils.py +49 -0
- mito_ai/utils/schema.py +86 -0
- mito_ai/utils/server_limits.py +152 -0
- mito_ai/utils/telemetry_utils.py +480 -0
- mito_ai/utils/utils.py +89 -0
- mito_ai/utils/version_utils.py +94 -0
- mito_ai/utils/websocket_base.py +88 -0
- mito_ai/version_check.py +60 -0
- mito_ai-0.1.50.data/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +7 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/build_log.json +728 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/package.json +243 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +238 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +37 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +21602 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js +619 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.78d3ccb73e7ca1da3aae.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style.js +4 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +712 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2792 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +4859 -0
- mito_ai-0.1.50.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +1 -0
- mito_ai-0.1.50.dist-info/METADATA +221 -0
- mito_ai-0.1.50.dist-info/RECORD +205 -0
- mito_ai-0.1.50.dist-info/WHEEL +4 -0
- mito_ai-0.1.50.dist-info/entry_points.txt +2 -0
- mito_ai-0.1.50.dist-info/licenses/LICENSE +3 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from mito_ai.utils.mito_server_utils import ProviderCompletionException
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TestProviderCompletionException:
|
|
9
|
+
"""Test the ProviderCompletionException class."""
|
|
10
|
+
|
|
11
|
+
@pytest.mark.parametrize("error_message,provider_name,error_type,expected_title,expected_hint_contains", [
|
|
12
|
+
(
|
|
13
|
+
"Something went wrong",
|
|
14
|
+
"LLM Provider",
|
|
15
|
+
"LLMProviderError",
|
|
16
|
+
"LLM Provider Error: Something went wrong",
|
|
17
|
+
"LLM Provider"
|
|
18
|
+
),
|
|
19
|
+
(
|
|
20
|
+
"API key is invalid",
|
|
21
|
+
"OpenAI",
|
|
22
|
+
"AuthenticationError",
|
|
23
|
+
"OpenAI Error: API key is invalid",
|
|
24
|
+
"OpenAI"
|
|
25
|
+
),
|
|
26
|
+
(
|
|
27
|
+
"There was an error accessing the Anthropic API: Error code: 529 - {'type': 'error', 'error': {'type': 'overloaded_error', 'message': 'Overloaded'}}",
|
|
28
|
+
"Anthropic",
|
|
29
|
+
"LLMProviderError",
|
|
30
|
+
"Anthropic Error: There was an error accessing the Anthropic API: Error code: 529 - {'type': 'error', 'error': {'type': 'overloaded_error', 'message': 'Overloaded'}}",
|
|
31
|
+
"Anthropic"
|
|
32
|
+
),
|
|
33
|
+
])
|
|
34
|
+
def test_exception_initialization(
|
|
35
|
+
self,
|
|
36
|
+
error_message: str,
|
|
37
|
+
provider_name: str,
|
|
38
|
+
error_type: str,
|
|
39
|
+
expected_title: str,
|
|
40
|
+
expected_hint_contains: str
|
|
41
|
+
):
|
|
42
|
+
"""Test exception initialization with various parameter combinations."""
|
|
43
|
+
exception = ProviderCompletionException(
|
|
44
|
+
error_message,
|
|
45
|
+
provider_name=provider_name,
|
|
46
|
+
error_type=error_type
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
assert exception.error_message == error_message
|
|
50
|
+
assert exception.provider_name == provider_name
|
|
51
|
+
assert exception.error_type == error_type
|
|
52
|
+
assert exception.user_friendly_title == expected_title
|
|
53
|
+
assert expected_hint_contains in exception.user_friendly_hint
|
|
54
|
+
assert str(exception) == expected_title
|
|
55
|
+
assert exception.args[0] == expected_title
|
|
56
|
+
|
|
57
|
+
def test_default_initialization(self):
|
|
58
|
+
"""Test exception initialization with default values."""
|
|
59
|
+
error_msg = "Something went wrong"
|
|
60
|
+
exception = ProviderCompletionException(error_msg)
|
|
61
|
+
|
|
62
|
+
assert exception.error_message == error_msg
|
|
63
|
+
assert exception.provider_name == "LLM Provider"
|
|
64
|
+
assert exception.error_type == "LLMProviderError"
|
|
65
|
+
assert exception.user_friendly_title == "LLM Provider Error: Something went wrong"
|
|
66
|
+
assert "LLM Provider" in exception.user_friendly_hint
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
6
|
+
from mito_ai.tests.providers.utils import mock_openai_client, patch_server_limits
|
|
7
|
+
from mito_ai.utils.server_limits import OS_MONTHLY_AI_COMPLETIONS_LIMIT
|
|
8
|
+
from traitlets.config import Config
|
|
9
|
+
|
|
10
|
+
FAKE_API_KEY = "sk-1234567890"
|
|
11
|
+
|
|
12
|
+
@pytest.fixture
|
|
13
|
+
def provider_config() -> Config:
|
|
14
|
+
"""Create a proper Config object for the OpenAIProvider."""
|
|
15
|
+
config = Config()
|
|
16
|
+
config.OpenAIProvider = Config()
|
|
17
|
+
config.OpenAIClient = Config()
|
|
18
|
+
return config
|
|
19
|
+
|
|
20
|
+
@pytest.mark.parametrize("is_pro,completion_count", [
|
|
21
|
+
(False, 1), # OS user below limit
|
|
22
|
+
(False, OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1), # OS user above limit
|
|
23
|
+
(True, 1), # Pro user below limit
|
|
24
|
+
(True, OS_MONTHLY_AI_COMPLETIONS_LIMIT + 1), # Pro user above limit
|
|
25
|
+
])
|
|
26
|
+
def test_openai_provider_with_limits(
|
|
27
|
+
is_pro: bool,
|
|
28
|
+
completion_count: int,
|
|
29
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
30
|
+
provider_config: Config) -> None:
|
|
31
|
+
"""Test OpenAI provider behavior with different user types and usage limits."""
|
|
32
|
+
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
33
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
34
|
+
|
|
35
|
+
with (
|
|
36
|
+
patch_server_limits(is_pro=is_pro, completion_count=completion_count),
|
|
37
|
+
mock_openai_client()
|
|
38
|
+
):
|
|
39
|
+
llm = OpenAIProvider(config=provider_config)
|
|
40
|
+
capabilities = llm.capabilities
|
|
41
|
+
assert "user key" in capabilities.provider
|
|
42
|
+
assert llm.last_error is None
|
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any, List, Optional
|
|
7
|
+
from unittest.mock import patch, MagicMock, AsyncMock
|
|
8
|
+
|
|
9
|
+
from mito_ai.tests.providers.utils import mock_azure_openai_client, mock_openai_client, patch_server_limits
|
|
10
|
+
import pytest
|
|
11
|
+
from traitlets.config import Config
|
|
12
|
+
from mito_ai.completions.providers import OpenAIProvider
|
|
13
|
+
from mito_ai.completions.models import (
|
|
14
|
+
MessageType,
|
|
15
|
+
AICapabilities,
|
|
16
|
+
CompletionReply
|
|
17
|
+
)
|
|
18
|
+
from mito_ai.utils.server_limits import OS_MONTHLY_AI_COMPLETIONS_LIMIT
|
|
19
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
20
|
+
|
|
21
|
+
REALLY_OLD_DATE = "2020-01-01"
|
|
22
|
+
TODAY = datetime.now().strftime("%Y-%m-%d")
|
|
23
|
+
FAKE_API_KEY = "sk-1234567890"
|
|
24
|
+
|
|
25
|
+
@pytest.fixture
|
|
26
|
+
def provider_config() -> Config:
|
|
27
|
+
"""Create a proper Config object for the OpenAIProvider."""
|
|
28
|
+
config = Config()
|
|
29
|
+
config.OpenAIProvider = Config()
|
|
30
|
+
config.OpenAIClient = Config()
|
|
31
|
+
return config
|
|
32
|
+
|
|
33
|
+
@pytest.fixture(autouse=True)
|
|
34
|
+
def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
35
|
+
for var in [
|
|
36
|
+
"OPENAI_API_KEY", "CLAUDE_API_KEY",
|
|
37
|
+
"GEMINI_API_KEY", "OLLAMA_MODEL",
|
|
38
|
+
"AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_MODEL"
|
|
39
|
+
]:
|
|
40
|
+
monkeypatch.delenv(var, raising=False)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# ====================
|
|
44
|
+
# TESTS
|
|
45
|
+
# ====================
|
|
46
|
+
|
|
47
|
+
@pytest.mark.parametrize("provider_config_data", [
|
|
48
|
+
{
|
|
49
|
+
"name": "openai",
|
|
50
|
+
"env_vars": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
51
|
+
"constants": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
52
|
+
"model": "gpt-4o-mini",
|
|
53
|
+
"mock_patch": "mito_ai.completions.providers.OpenAIClient",
|
|
54
|
+
"mock_method": "request_completions",
|
|
55
|
+
"provider_name": "OpenAI with user key",
|
|
56
|
+
"key_type": "user"
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"name": "claude",
|
|
60
|
+
"env_vars": {"CLAUDE_API_KEY": "claude-key"},
|
|
61
|
+
"constants": {"CLAUDE_API_KEY": "claude-key", "OPENAI_API_KEY": None},
|
|
62
|
+
"model": "claude-3-opus-20240229",
|
|
63
|
+
"mock_patch": "mito_ai.completions.providers.AnthropicClient",
|
|
64
|
+
"mock_method": "request_completions",
|
|
65
|
+
"provider_name": "Claude",
|
|
66
|
+
"key_type": "claude"
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"name": "gemini",
|
|
70
|
+
"env_vars": {"GEMINI_API_KEY": "gemini-key"},
|
|
71
|
+
"constants": {"GEMINI_API_KEY": "gemini-key", "OPENAI_API_KEY": None},
|
|
72
|
+
"model": "gemini-2.0-flash",
|
|
73
|
+
"mock_patch": "mito_ai.completions.providers.GeminiClient",
|
|
74
|
+
"mock_method": "request_completions",
|
|
75
|
+
"provider_name": "Gemini",
|
|
76
|
+
"key_type": "gemini"
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
"name": "azure",
|
|
80
|
+
"env_vars": {"AZURE_OPENAI_API_KEY": "azure-key"},
|
|
81
|
+
"constants": {"AZURE_OPENAI_API_KEY": "azure-key", "OPENAI_API_KEY": None},
|
|
82
|
+
"model": "gpt-4o",
|
|
83
|
+
"mock_patch": "mito_ai.completions.providers.OpenAIClient",
|
|
84
|
+
"mock_method": "request_completions",
|
|
85
|
+
"provider_name": "Azure OpenAI",
|
|
86
|
+
"key_type": "azure"
|
|
87
|
+
}
|
|
88
|
+
])
|
|
89
|
+
@pytest.mark.asyncio
|
|
90
|
+
async def test_completion_request(
|
|
91
|
+
provider_config_data: dict,
|
|
92
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
93
|
+
provider_config: Config
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Test completion requests for different providers."""
|
|
96
|
+
# Set up environment variables
|
|
97
|
+
for env_var, value in provider_config_data["env_vars"].items():
|
|
98
|
+
monkeypatch.setenv(env_var, value)
|
|
99
|
+
|
|
100
|
+
# Set up constants
|
|
101
|
+
for constant, value in provider_config_data["constants"].items():
|
|
102
|
+
monkeypatch.setattr(f"mito_ai.constants.{constant}", value)
|
|
103
|
+
|
|
104
|
+
# Create mock client
|
|
105
|
+
mock_client = MagicMock()
|
|
106
|
+
mock_client.capabilities = AICapabilities(
|
|
107
|
+
configuration={"model": provider_config_data["model"]},
|
|
108
|
+
provider=provider_config_data["provider_name"],
|
|
109
|
+
type="ai_capabilities"
|
|
110
|
+
)
|
|
111
|
+
mock_client.key_type = provider_config_data["key_type"]
|
|
112
|
+
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
113
|
+
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
114
|
+
|
|
115
|
+
with patch(provider_config_data["mock_patch"], return_value=mock_client):
|
|
116
|
+
llm = OpenAIProvider(config=provider_config)
|
|
117
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
118
|
+
{"role": "user", "content": "Test message"}
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
completion = await llm.request_completions(
|
|
122
|
+
message_type=MessageType.CHAT,
|
|
123
|
+
messages=messages,
|
|
124
|
+
model=provider_config_data["model"]
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
assert completion == "Test completion"
|
|
128
|
+
getattr(mock_client, provider_config_data["mock_method"]).assert_called_once()
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@pytest.mark.parametrize("provider_config_data", [
|
|
132
|
+
{
|
|
133
|
+
"name": "openai",
|
|
134
|
+
"env_vars": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
135
|
+
"constants": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
136
|
+
"model": "gpt-4o-mini",
|
|
137
|
+
"mock_patch": "mito_ai.completions.providers.OpenAIClient",
|
|
138
|
+
"mock_method": "stream_completions",
|
|
139
|
+
"provider_name": "OpenAI with user key",
|
|
140
|
+
"key_type": "user"
|
|
141
|
+
},
|
|
142
|
+
{
|
|
143
|
+
"name": "claude",
|
|
144
|
+
"env_vars": {"CLAUDE_API_KEY": "claude-key"},
|
|
145
|
+
"constants": {"CLAUDE_API_KEY": "claude-key", "OPENAI_API_KEY": None},
|
|
146
|
+
"model": "claude-3-opus-20240229",
|
|
147
|
+
"mock_patch": "mito_ai.completions.providers.AnthropicClient",
|
|
148
|
+
"mock_method": "stream_completions",
|
|
149
|
+
"provider_name": "Claude",
|
|
150
|
+
"key_type": "claude"
|
|
151
|
+
},
|
|
152
|
+
{
|
|
153
|
+
"name": "gemini",
|
|
154
|
+
"env_vars": {"GEMINI_API_KEY": "gemini-key"},
|
|
155
|
+
"constants": {"GEMINI_API_KEY": "gemini-key", "OPENAI_API_KEY": None},
|
|
156
|
+
"model": "gemini-2.0-flash",
|
|
157
|
+
"mock_patch": "mito_ai.completions.providers.GeminiClient",
|
|
158
|
+
"mock_method": "stream_completions",
|
|
159
|
+
"provider_name": "Gemini",
|
|
160
|
+
"key_type": "gemini"
|
|
161
|
+
},
|
|
162
|
+
])
|
|
163
|
+
@pytest.mark.asyncio
|
|
164
|
+
async def test_stream_completion_parameterized(
|
|
165
|
+
provider_config_data: dict,
|
|
166
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
167
|
+
provider_config: Config
|
|
168
|
+
) -> None:
|
|
169
|
+
"""Test stream completions for different providers."""
|
|
170
|
+
# Set up environment variables
|
|
171
|
+
for env_var, value in provider_config_data["env_vars"].items():
|
|
172
|
+
monkeypatch.setenv(env_var, value)
|
|
173
|
+
|
|
174
|
+
# Set up constants
|
|
175
|
+
for constant, value in provider_config_data["constants"].items():
|
|
176
|
+
monkeypatch.setattr(f"mito_ai.constants.{constant}", value)
|
|
177
|
+
|
|
178
|
+
# Create mock client
|
|
179
|
+
mock_client = MagicMock()
|
|
180
|
+
mock_client.capabilities = AICapabilities(
|
|
181
|
+
configuration={"model": provider_config_data["model"]},
|
|
182
|
+
provider=provider_config_data["provider_name"],
|
|
183
|
+
type="ai_capabilities"
|
|
184
|
+
)
|
|
185
|
+
mock_client.key_type = provider_config_data["key_type"]
|
|
186
|
+
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
187
|
+
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
188
|
+
mock_client.stream_response = AsyncMock(return_value="Test completion") # For Claude
|
|
189
|
+
|
|
190
|
+
with patch(provider_config_data["mock_patch"], return_value=mock_client):
|
|
191
|
+
llm = OpenAIProvider(config=provider_config)
|
|
192
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
193
|
+
{"role": "user", "content": "Test message"}
|
|
194
|
+
]
|
|
195
|
+
|
|
196
|
+
reply_chunks = []
|
|
197
|
+
def mock_reply(chunk):
|
|
198
|
+
reply_chunks.append(chunk)
|
|
199
|
+
|
|
200
|
+
completion = await llm.stream_completions(
|
|
201
|
+
message_type=MessageType.CHAT,
|
|
202
|
+
messages=messages,
|
|
203
|
+
model=provider_config_data["model"],
|
|
204
|
+
message_id="test-id",
|
|
205
|
+
thread_id="test-thread",
|
|
206
|
+
reply_fn=mock_reply
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
assert completion == "Test completion"
|
|
210
|
+
getattr(mock_client, provider_config_data["mock_method"]).assert_called_once()
|
|
211
|
+
assert len(reply_chunks) > 0
|
|
212
|
+
assert isinstance(reply_chunks[0], CompletionReply)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def test_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
216
|
+
monkeypatch.setenv("OPENAI_API_KEY", "invalid-key")
|
|
217
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "invalid-key")
|
|
218
|
+
mock_client = MagicMock()
|
|
219
|
+
mock_client.capabilities = AICapabilities(
|
|
220
|
+
configuration={"model": "gpt-4o-mini"},
|
|
221
|
+
provider="OpenAI with user key",
|
|
222
|
+
type="ai_capabilities"
|
|
223
|
+
)
|
|
224
|
+
mock_client.key_type = "user"
|
|
225
|
+
mock_client.request_completions.side_effect = Exception("API error")
|
|
226
|
+
|
|
227
|
+
with patch("mito_ai.completions.providers.OpenAIClient", return_value=mock_client):
|
|
228
|
+
llm = OpenAIProvider(config=provider_config)
|
|
229
|
+
assert llm.last_error is None # Error should be None until a request is made
|
|
230
|
+
|
|
231
|
+
def test_claude_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
232
|
+
monkeypatch.setenv("CLAUDE_API_KEY", "invalid-key")
|
|
233
|
+
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", "invalid-key")
|
|
234
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
235
|
+
|
|
236
|
+
mock_client = MagicMock()
|
|
237
|
+
mock_client.capabilities = AICapabilities(
|
|
238
|
+
configuration={"model": "claude-3-opus-20240229"},
|
|
239
|
+
provider="Claude",
|
|
240
|
+
type="ai_capabilities"
|
|
241
|
+
)
|
|
242
|
+
mock_client.key_type = "claude"
|
|
243
|
+
mock_client.request_completions.side_effect = Exception("API error")
|
|
244
|
+
|
|
245
|
+
with patch("mito_ai.completions.providers.AnthropicClient", return_value=mock_client):
|
|
246
|
+
llm = OpenAIProvider(config=provider_config)
|
|
247
|
+
assert llm.last_error is None # Error should be None until a request is made
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
# Mito Server Fallback Tests
|
|
251
|
+
@pytest.mark.parametrize("mito_server_config", [
|
|
252
|
+
{
|
|
253
|
+
"name": "openai_fallback",
|
|
254
|
+
"model": "gpt-4o-mini",
|
|
255
|
+
"mock_function": "mito_ai.openai_client.get_ai_completion_from_mito_server",
|
|
256
|
+
"provider_name": "Mito server",
|
|
257
|
+
"key_type": "mito_server"
|
|
258
|
+
},
|
|
259
|
+
{
|
|
260
|
+
"name": "claude_fallback",
|
|
261
|
+
"model": "claude-3-opus-20240229",
|
|
262
|
+
"mock_function": "mito_ai.anthropic_client.get_anthropic_completion_from_mito_server",
|
|
263
|
+
"provider_name": "Claude",
|
|
264
|
+
"key_type": "claude"
|
|
265
|
+
},
|
|
266
|
+
{
|
|
267
|
+
"name": "gemini_fallback",
|
|
268
|
+
"model": "gemini-2.0-flash",
|
|
269
|
+
"mock_function": "mito_ai.gemini_client.get_gemini_completion_from_mito_server",
|
|
270
|
+
"provider_name": "Gemini",
|
|
271
|
+
"key_type": "gemini"
|
|
272
|
+
},
|
|
273
|
+
])
|
|
274
|
+
@pytest.mark.asyncio
|
|
275
|
+
async def test_mito_server_fallback_completion_request(
|
|
276
|
+
mito_server_config: dict,
|
|
277
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
278
|
+
provider_config: Config
|
|
279
|
+
) -> None:
|
|
280
|
+
"""Test that completion requests fallback to Mito server when no API keys are set."""
|
|
281
|
+
# Clear all API keys to force Mito server fallback
|
|
282
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
283
|
+
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
|
|
284
|
+
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
285
|
+
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
286
|
+
provider_config.OpenAIProvider.api_key = None
|
|
287
|
+
|
|
288
|
+
# Mock the appropriate Mito server function
|
|
289
|
+
with patch(mito_server_config["mock_function"], new_callable=AsyncMock) as mock_mito_function:
|
|
290
|
+
mock_mito_function.return_value = "Mito server response"
|
|
291
|
+
|
|
292
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
293
|
+
{"role": "user", "content": "Test message"}
|
|
294
|
+
]
|
|
295
|
+
|
|
296
|
+
with patch_server_limits():
|
|
297
|
+
llm = OpenAIProvider(config=provider_config)
|
|
298
|
+
|
|
299
|
+
completion = await llm.request_completions(
|
|
300
|
+
message_type=MessageType.CHAT,
|
|
301
|
+
messages=messages,
|
|
302
|
+
model=mito_server_config["model"]
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
assert completion == "Mito server response"
|
|
306
|
+
mock_mito_function.assert_called_once()
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
@pytest.mark.parametrize("mito_server_config", [
|
|
310
|
+
{
|
|
311
|
+
"name": "openai_fallback",
|
|
312
|
+
"model": "gpt-4o-mini",
|
|
313
|
+
"mock_function": "mito_ai.openai_client.stream_ai_completion_from_mito_server",
|
|
314
|
+
"provider_name": "Mito server",
|
|
315
|
+
"key_type": "mito_server"
|
|
316
|
+
},
|
|
317
|
+
{
|
|
318
|
+
"name": "claude_fallback",
|
|
319
|
+
"model": "claude-3-opus-20240229",
|
|
320
|
+
"mock_function": "mito_ai.anthropic_client.stream_anthropic_completion_from_mito_server",
|
|
321
|
+
"provider_name": "Claude",
|
|
322
|
+
"key_type": "claude"
|
|
323
|
+
},
|
|
324
|
+
{
|
|
325
|
+
"name": "gemini_fallback",
|
|
326
|
+
"model": "gemini-2.0-flash",
|
|
327
|
+
"mock_function": "mito_ai.gemini_client.stream_gemini_completion_from_mito_server",
|
|
328
|
+
"provider_name": "Gemini",
|
|
329
|
+
"key_type": "gemini"
|
|
330
|
+
},
|
|
331
|
+
])
|
|
332
|
+
@pytest.mark.asyncio
|
|
333
|
+
async def test_mito_server_fallback_stream_completion(
|
|
334
|
+
mito_server_config: dict,
|
|
335
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
336
|
+
provider_config: Config
|
|
337
|
+
) -> None:
|
|
338
|
+
"""Test that stream completions fallback to Mito server when no API keys are set."""
|
|
339
|
+
# Clear all API keys to force Mito server fallback
|
|
340
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
341
|
+
monkeypatch.setattr("mito_ai.constants.CLAUDE_API_KEY", None)
|
|
342
|
+
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
343
|
+
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
344
|
+
provider_config.OpenAIProvider.api_key = None
|
|
345
|
+
|
|
346
|
+
# Create an async generator that yields chunks for streaming
|
|
347
|
+
async def mock_stream_generator():
|
|
348
|
+
yield "Chunk 1"
|
|
349
|
+
yield "Chunk 2"
|
|
350
|
+
yield "Chunk 3"
|
|
351
|
+
|
|
352
|
+
# Mock the appropriate Mito server streaming function
|
|
353
|
+
with patch(mito_server_config["mock_function"]) as mock_mito_stream:
|
|
354
|
+
mock_mito_stream.return_value = mock_stream_generator()
|
|
355
|
+
|
|
356
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
357
|
+
{"role": "user", "content": "Test message"}
|
|
358
|
+
]
|
|
359
|
+
|
|
360
|
+
reply_chunks = []
|
|
361
|
+
def mock_reply(chunk):
|
|
362
|
+
reply_chunks.append(chunk)
|
|
363
|
+
|
|
364
|
+
# Apply patch_server_limits for all cases, not just openai_fallback
|
|
365
|
+
# Also patch update_mito_server_quota where it's actually used in openai_client
|
|
366
|
+
with patch_server_limits(), patch("mito_ai.openai_client.update_mito_server_quota", MagicMock(return_value=None)):
|
|
367
|
+
llm = OpenAIProvider(config=provider_config)
|
|
368
|
+
|
|
369
|
+
completion = await llm.stream_completions(
|
|
370
|
+
message_type=MessageType.CHAT,
|
|
371
|
+
messages=messages,
|
|
372
|
+
model=mito_server_config["model"],
|
|
373
|
+
message_id="test-id",
|
|
374
|
+
thread_id="test-thread",
|
|
375
|
+
reply_fn=mock_reply
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
# Verify that the Mito server function was called
|
|
379
|
+
mock_mito_stream.assert_called_once()
|
|
380
|
+
# Verify that reply chunks were generated
|
|
381
|
+
assert len(reply_chunks) > 0
|
|
382
|
+
assert isinstance(reply_chunks[0], CompletionReply)
|