mito-ai 0.1.56__py3-none-any.whl → 0.1.58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +17 -21
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +24 -14
- mito_ai/chart_wizard/__init__.py +3 -0
- mito_ai/chart_wizard/handlers.py +113 -0
- mito_ai/chart_wizard/urls.py +26 -0
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
- mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
- mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
- mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
- mito_ai/completions/completion_handlers/completion_handler.py +14 -7
- mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
- mito_ai/completions/completion_handlers/scratchpad_result_handler.py +64 -0
- mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
- mito_ai/completions/completion_handlers/utils.py +3 -7
- mito_ai/completions/handlers.py +36 -21
- mito_ai/completions/message_history.py +8 -10
- mito_ai/completions/models.py +23 -2
- mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +5 -3
- mito_ai/completions/prompt_builders/agent_system_message.py +97 -5
- mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
- mito_ai/completions/prompt_builders/chart_conversion_prompt.py +27 -0
- mito_ai/completions/prompt_builders/chat_system_message.py +2 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +28 -0
- mito_ai/completions/prompt_builders/scratchpad_result_prompt.py +17 -0
- mito_ai/constants.py +8 -1
- mito_ai/enterprise/__init__.py +1 -1
- mito_ai/enterprise/litellm_client.py +137 -0
- mito_ai/log/handlers.py +1 -1
- mito_ai/openai_client.py +10 -90
- mito_ai/{completions/providers.py → provider_manager.py} +157 -53
- mito_ai/settings/enterprise_handler.py +26 -0
- mito_ai/settings/urls.py +2 -0
- mito_ai/streamlit_conversion/agent_utils.py +2 -30
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
- mito_ai/streamlit_preview/handlers.py +6 -3
- mito_ai/streamlit_preview/urls.py +5 -3
- mito_ai/tests/message_history/test_generate_short_chat_name.py +72 -28
- mito_ai/tests/providers/test_anthropic_client.py +174 -16
- mito_ai/tests/providers/test_azure.py +13 -13
- mito_ai/tests/providers/test_capabilities.py +14 -17
- mito_ai/tests/providers/test_gemini_client.py +14 -13
- mito_ai/tests/providers/test_model_resolution.py +145 -89
- mito_ai/tests/providers/test_openai_client.py +209 -13
- mito_ai/tests/providers/test_provider_limits.py +5 -5
- mito_ai/tests/providers/test_providers.py +229 -51
- mito_ai/tests/providers/test_retry_logic.py +13 -22
- mito_ai/tests/providers/utils.py +4 -4
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
- mito_ai/tests/test_enterprise_mode.py +162 -0
- mito_ai/tests/test_model_utils.py +271 -0
- mito_ai/utils/anthropic_utils.py +8 -6
- mito_ai/utils/gemini_utils.py +0 -3
- mito_ai/utils/litellm_utils.py +84 -0
- mito_ai/utils/model_utils.py +178 -0
- mito_ai/utils/open_ai_utils.py +0 -8
- mito_ai/utils/provider_utils.py +6 -21
- mito_ai/utils/telemetry_utils.py +14 -2
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js +2992 -282
- mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +1 -0
- mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js +17 -17
- mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js.map → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +1 -1
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +7 -2
- {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/METADATA +2 -1
- {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/RECORD +94 -81
- mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js.map +0 -1
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
- {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,15 +11,18 @@ from mito_ai.utils.telemetry_utils import log_streamlit_app_conversion_error, lo
|
|
|
11
11
|
from mito_ai.completions.models import MessageType
|
|
12
12
|
from mito_ai.utils.error_classes import StreamlitConversionError, StreamlitPreviewError
|
|
13
13
|
from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
|
|
14
|
+
from mito_ai.provider_manager import ProviderManager
|
|
14
15
|
import traceback
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
class StreamlitPreviewHandler(APIHandler):
|
|
18
19
|
"""REST handler for streamlit preview operations."""
|
|
19
20
|
|
|
20
|
-
def initialize(self) -> None:
|
|
21
|
+
def initialize(self, llm: ProviderManager) -> None:
|
|
21
22
|
"""Initialize the handler."""
|
|
23
|
+
super().initialize()
|
|
22
24
|
self.preview_manager = StreamlitPreviewManager()
|
|
25
|
+
self._llm = llm
|
|
23
26
|
|
|
24
27
|
@tornado.web.authenticated
|
|
25
28
|
|
|
@@ -45,11 +48,11 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
45
48
|
print("[Mito AI] Force recreating streamlit app")
|
|
46
49
|
|
|
47
50
|
# Create a new app
|
|
48
|
-
await streamlit_handler(True, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
51
|
+
await streamlit_handler(True, absolute_notebook_path, app_file_name, streamlit_app_prompt, self._llm)
|
|
49
52
|
elif streamlit_app_prompt != '':
|
|
50
53
|
# Update an existing app if there is a prompt provided. Otherwise, the user is just
|
|
51
54
|
# starting an existing app so we can skip the streamlit_handler all together
|
|
52
|
-
await streamlit_handler(False, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
55
|
+
await streamlit_handler(False, absolute_notebook_path, app_file_name, streamlit_app_prompt, self._llm)
|
|
53
56
|
|
|
54
57
|
# Start preview
|
|
55
58
|
# TODO: There's a bug here where when the user rebuilds and already running app. Instead of
|
|
@@ -4,12 +4,14 @@
|
|
|
4
4
|
from typing import Any, List, Tuple
|
|
5
5
|
from jupyter_server.utils import url_path_join
|
|
6
6
|
from mito_ai.streamlit_preview.handlers import StreamlitPreviewHandler
|
|
7
|
+
from mito_ai.provider_manager import ProviderManager
|
|
7
8
|
|
|
8
|
-
def get_streamlit_preview_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
9
|
+
def get_streamlit_preview_urls(base_url: str, provider_manager: ProviderManager) -> List[Tuple[str, Any, dict]]:
|
|
9
10
|
"""Get all streamlit preview related URL patterns.
|
|
10
11
|
|
|
11
12
|
Args:
|
|
12
13
|
base_url: The base URL for the Jupyter server
|
|
14
|
+
provider_manager: The ProviderManager instance
|
|
13
15
|
|
|
14
16
|
Returns:
|
|
15
17
|
List of (url_pattern, handler_class, handler_kwargs) tuples
|
|
@@ -17,6 +19,6 @@ def get_streamlit_preview_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
|
17
19
|
BASE_URL = base_url + "/mito-ai"
|
|
18
20
|
|
|
19
21
|
return [
|
|
20
|
-
(url_path_join(BASE_URL, "streamlit-preview"), StreamlitPreviewHandler, {}),
|
|
21
|
-
(url_path_join(BASE_URL, "streamlit-preview/(.+)"), StreamlitPreviewHandler, {}),
|
|
22
|
+
(url_path_join(BASE_URL, "streamlit-preview"), StreamlitPreviewHandler, {"llm": provider_manager}),
|
|
23
|
+
(url_path_join(BASE_URL, "streamlit-preview/(.+)"), StreamlitPreviewHandler, {"llm": provider_manager}),
|
|
22
24
|
]
|
|
@@ -5,24 +5,25 @@ import pytest
|
|
|
5
5
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
6
6
|
from traitlets.config import Config
|
|
7
7
|
from mito_ai.completions.message_history import generate_short_chat_name
|
|
8
|
-
from mito_ai.
|
|
8
|
+
from mito_ai.provider_manager import ProviderManager
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
@pytest.fixture
|
|
12
12
|
def provider_config() -> Config:
|
|
13
|
-
"""Create a proper Config object for the
|
|
13
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
14
14
|
config = Config()
|
|
15
|
-
config.
|
|
15
|
+
config.ProviderManager = Config()
|
|
16
16
|
config.OpenAIClient = Config()
|
|
17
17
|
return config
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
# Test cases for different models and their expected providers/fast models
|
|
21
21
|
PROVIDER_TEST_CASES = [
|
|
22
|
-
# (model, client_patch_path)
|
|
23
|
-
("gpt-4.1", "mito_ai.
|
|
24
|
-
("claude-
|
|
25
|
-
("gemini-
|
|
22
|
+
# (model, client_patch_path) - patch where the classes are used (in provider_manager)
|
|
23
|
+
("gpt-4.1", "mito_ai.provider_manager.OpenAIClient"),
|
|
24
|
+
("claude-sonnet-4-5-20250929", "mito_ai.provider_manager.AnthropicClient"),
|
|
25
|
+
("gemini-3-flash-preview", "mito_ai.provider_manager.GeminiClient"),
|
|
26
|
+
("openai/gpt-4o", "mito_ai.provider_manager.LiteLLMClient"), # LiteLLM test case
|
|
26
27
|
]
|
|
27
28
|
|
|
28
29
|
@pytest.mark.parametrize("selected_model,client_patch_path", PROVIDER_TEST_CASES)
|
|
@@ -37,31 +38,77 @@ async def test_generate_short_chat_name_uses_correct_provider_and_fast_model(
|
|
|
37
38
|
|
|
38
39
|
# Set up environment variables for all providers
|
|
39
40
|
monkeypatch.setenv("OPENAI_API_KEY", "fake-openai-key")
|
|
40
|
-
monkeypatch.setenv("
|
|
41
|
+
monkeypatch.setenv("ANTHROPIC_API_KEY", "fake-claude-key")
|
|
41
42
|
monkeypatch.setenv("GEMINI_API_KEY", "fake-gemini-key")
|
|
42
43
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "fake-openai-key")
|
|
43
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
44
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", "fake-claude-key")
|
|
44
45
|
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", "fake-gemini-key")
|
|
45
46
|
|
|
47
|
+
# Set up LiteLLM constants if testing LiteLLM
|
|
48
|
+
if "LiteLLMClient" in client_patch_path:
|
|
49
|
+
# Patch constants both at the source and where they're imported in model_utils
|
|
50
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_BASE_URL", "https://litellm-server.com")
|
|
51
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_API_KEY", "fake-litellm-key")
|
|
52
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_MODELS", ["openai/gpt-4o", "anthropic/claude-3-5-sonnet"])
|
|
53
|
+
# Also patch where constants is imported in model_utils (where get_available_models uses it)
|
|
54
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_BASE_URL", "https://litellm-server.com")
|
|
55
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_MODELS", ["openai/gpt-4o", "anthropic/claude-3-5-sonnet"])
|
|
56
|
+
# Mock is_enterprise to return True so LiteLLM models are available
|
|
57
|
+
monkeypatch.setattr("mito_ai.utils.version_utils.is_enterprise", lambda: True)
|
|
58
|
+
|
|
46
59
|
# Create mock client for the specific provider being tested
|
|
47
60
|
mock_client = MagicMock()
|
|
48
61
|
mock_client.request_completions = AsyncMock(return_value="Test Chat Name")
|
|
49
62
|
|
|
63
|
+
# Create the ProviderManager first
|
|
64
|
+
llm_provider = ProviderManager(config=provider_config)
|
|
65
|
+
|
|
66
|
+
# Set the selected model (this is required for the ProviderManager to use the correct model)
|
|
67
|
+
llm_provider.set_selected_model(selected_model)
|
|
68
|
+
|
|
50
69
|
# Patch the specific client class that should be used based on the model
|
|
51
|
-
#
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
70
|
+
# For Anthropic, Gemini, and LiteLLM, new instances are created in request_completions, so we patch the class
|
|
71
|
+
# For OpenAI, the instance is created in __init__, so we patch the instance's method
|
|
72
|
+
if "AnthropicClient" in client_patch_path:
|
|
73
|
+
with patch(client_patch_path, return_value=mock_client):
|
|
74
|
+
result = await generate_short_chat_name(
|
|
75
|
+
user_message="What is the capital of France?",
|
|
76
|
+
assistant_message="The capital of France is Paris.",
|
|
77
|
+
llm_provider=llm_provider
|
|
78
|
+
)
|
|
79
|
+
elif "GeminiClient" in client_patch_path:
|
|
80
|
+
with patch(client_patch_path, return_value=mock_client):
|
|
81
|
+
result = await generate_short_chat_name(
|
|
82
|
+
user_message="What is the capital of France?",
|
|
83
|
+
assistant_message="The capital of France is Paris.",
|
|
84
|
+
llm_provider=llm_provider
|
|
85
|
+
)
|
|
86
|
+
elif "LiteLLMClient" in client_patch_path:
|
|
87
|
+
# Patch LiteLLMClient where it's defined (it's imported inside request_completions)
|
|
88
|
+
# Also patch get_available_models to return LiteLLM models
|
|
89
|
+
with patch("mito_ai.enterprise.litellm_client.LiteLLMClient", return_value=mock_client), \
|
|
90
|
+
patch("mito_ai.provider_manager.get_available_models", return_value=["openai/gpt-4o", "anthropic/claude-3-5-sonnet"]):
|
|
91
|
+
result = await generate_short_chat_name(
|
|
92
|
+
user_message="What is the capital of France?",
|
|
93
|
+
assistant_message="The capital of France is Paris.",
|
|
94
|
+
llm_provider=llm_provider
|
|
95
|
+
)
|
|
96
|
+
else: # OpenAI
|
|
97
|
+
# For OpenAI, patch the instance's method since the client is created in __init__
|
|
98
|
+
assert llm_provider._openai_client is not None, "OpenAI client should be initialized"
|
|
99
|
+
with patch.object(llm_provider._openai_client, 'request_completions', new_callable=AsyncMock, return_value="Test Chat Name") as mock_openai_request:
|
|
100
|
+
result = await generate_short_chat_name(
|
|
101
|
+
user_message="What is the capital of France?",
|
|
102
|
+
assistant_message="The capital of France is Paris.",
|
|
103
|
+
llm_provider=llm_provider
|
|
104
|
+
)
|
|
105
|
+
# Verify that the OpenAI client's request_completions was called
|
|
106
|
+
mock_openai_request.assert_called_once() # type: ignore
|
|
107
|
+
# As a double check, if we have used the correct client, then we must get the correct result
|
|
108
|
+
assert result == "Test Chat Name"
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
# Verify that the correct client's request_completions was called (for Anthropic, Gemini, and LiteLLM)
|
|
65
112
|
mock_client.request_completions.assert_called_once()
|
|
66
113
|
|
|
67
114
|
# As a double check, if we have used the correct client, then we must get the correct result
|
|
@@ -74,13 +121,12 @@ async def test_generate_short_chat_name_cleans_gemini_response() -> None:
|
|
|
74
121
|
"""Test that generate_short_chat_name properly cleans Gemini-style responses with quotes and newlines."""
|
|
75
122
|
|
|
76
123
|
# Create mock llm_provider that returns a response with quotes and newlines
|
|
77
|
-
mock_llm_provider = MagicMock(spec=
|
|
124
|
+
mock_llm_provider = MagicMock(spec=ProviderManager)
|
|
78
125
|
mock_llm_provider.request_completions = AsyncMock(return_value='"France Geography Discussion\n"')
|
|
79
126
|
|
|
80
127
|
result = await generate_short_chat_name(
|
|
81
128
|
user_message="What is the capital of France?",
|
|
82
129
|
assistant_message="The capital of France is Paris.",
|
|
83
|
-
model="gemini-2.0-flash-exp",
|
|
84
130
|
llm_provider=mock_llm_provider
|
|
85
131
|
)
|
|
86
132
|
|
|
@@ -95,13 +141,12 @@ async def test_generate_short_chat_name_handles_empty_response() -> None:
|
|
|
95
141
|
"""Test that generate_short_chat_name handles empty or None responses gracefully."""
|
|
96
142
|
|
|
97
143
|
# Test with empty string response
|
|
98
|
-
mock_llm_provider = MagicMock(spec=
|
|
144
|
+
mock_llm_provider = MagicMock(spec=ProviderManager)
|
|
99
145
|
mock_llm_provider.request_completions = AsyncMock(return_value="")
|
|
100
146
|
|
|
101
147
|
result = await generate_short_chat_name(
|
|
102
148
|
user_message="Test message",
|
|
103
149
|
assistant_message="Test response",
|
|
104
|
-
model="gpt-4.1",
|
|
105
150
|
llm_provider=mock_llm_provider
|
|
106
151
|
)
|
|
107
152
|
|
|
@@ -113,7 +158,6 @@ async def test_generate_short_chat_name_handles_empty_response() -> None:
|
|
|
113
158
|
result = await generate_short_chat_name(
|
|
114
159
|
user_message="Test message",
|
|
115
160
|
assistant_message="Test response",
|
|
116
|
-
model="gpt-4.1",
|
|
117
161
|
llm_provider=mock_llm_provider
|
|
118
162
|
)
|
|
119
163
|
|
|
@@ -3,11 +3,10 @@
|
|
|
3
3
|
|
|
4
4
|
import pytest
|
|
5
5
|
from mito_ai.anthropic_client import get_anthropic_system_prompt_and_messages, get_anthropic_system_prompt_and_messages_with_caching, add_cache_control_to_message, extract_and_parse_anthropic_json_response, AnthropicClient
|
|
6
|
-
from mito_ai.utils.anthropic_utils import FAST_ANTHROPIC_MODEL
|
|
7
6
|
from anthropic.types import Message, TextBlock, ToolUseBlock, Usage, ToolUseBlock, Message, Usage, TextBlock
|
|
8
7
|
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionSystemMessageParam
|
|
9
8
|
from mito_ai.completions.models import MessageType
|
|
10
|
-
from unittest.mock import patch
|
|
9
|
+
from unittest.mock import MagicMock, patch
|
|
11
10
|
import anthropic
|
|
12
11
|
from typing import List, Dict, cast
|
|
13
12
|
|
|
@@ -233,24 +232,25 @@ def test_tool_use_without_agent_response():
|
|
|
233
232
|
assert "No valid AgentResponse format found" in str(exc_info.value)
|
|
234
233
|
|
|
235
234
|
CUSTOM_MODEL = "smart-anthropic-model"
|
|
236
|
-
@pytest.mark.parametrize("message_type
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
235
|
+
@pytest.mark.parametrize("message_type", [
|
|
236
|
+
MessageType.CHAT,
|
|
237
|
+
MessageType.SMART_DEBUG,
|
|
238
|
+
MessageType.CODE_EXPLAIN,
|
|
239
|
+
MessageType.AGENT_EXECUTION,
|
|
240
|
+
MessageType.AGENT_AUTO_ERROR_FIXUP,
|
|
241
|
+
MessageType.INLINE_COMPLETION,
|
|
242
|
+
MessageType.CHAT_NAME_GENERATION,
|
|
244
243
|
])
|
|
245
244
|
@pytest.mark.asyncio
|
|
246
|
-
async def
|
|
245
|
+
async def test_model_selection_uses_passed_model(message_type):
|
|
247
246
|
"""
|
|
248
|
-
Tests that the
|
|
247
|
+
Tests that the model passed to the client is used as-is.
|
|
248
|
+
Model selection based on message type is now handled by ProviderManager.
|
|
249
249
|
"""
|
|
250
250
|
client = AnthropicClient(api_key="test_key")
|
|
251
251
|
|
|
252
|
-
# Mock the messages.create method directly
|
|
253
|
-
with patch.object(client.client.messages, 'create') as mock_create: # type: ignore
|
|
252
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
253
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
254
254
|
# Create a mock response
|
|
255
255
|
mock_response = Message(
|
|
256
256
|
id="test_id",
|
|
@@ -269,10 +269,168 @@ async def test_model_selection_based_on_message_type(message_type, expected_mode
|
|
|
269
269
|
response_format_info=None
|
|
270
270
|
)
|
|
271
271
|
|
|
272
|
-
# Verify that create was called with the
|
|
272
|
+
# Verify that create was called with the model that was passed (not overridden)
|
|
273
273
|
mock_create.assert_called_once()
|
|
274
274
|
call_args = mock_create.call_args
|
|
275
|
-
assert call_args[1]['model'] ==
|
|
275
|
+
assert call_args[1]['model'] == CUSTOM_MODEL
|
|
276
|
+
|
|
277
|
+
@pytest.mark.asyncio
|
|
278
|
+
async def test_anthropic_client_uses_fast_model_from_provider_manager_without_override():
|
|
279
|
+
"""Test that Anthropic client uses the fast model passed from ProviderManager without internal override."""
|
|
280
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
281
|
+
|
|
282
|
+
client = AnthropicClient(api_key="test_key")
|
|
283
|
+
|
|
284
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
285
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
286
|
+
# Create a mock response
|
|
287
|
+
mock_response = Message(
|
|
288
|
+
id="test_id",
|
|
289
|
+
role="assistant",
|
|
290
|
+
content=[TextBlock(type="text", text="test")],
|
|
291
|
+
model='anthropic-model-we-do-not-check',
|
|
292
|
+
type="message",
|
|
293
|
+
usage=Usage(input_tokens=0, output_tokens=0)
|
|
294
|
+
)
|
|
295
|
+
mock_create.return_value = mock_response
|
|
296
|
+
|
|
297
|
+
# Use a fast model that would be selected by ProviderManager
|
|
298
|
+
fast_model = get_fast_model_for_selected_model("claude-sonnet-4-5-20250929")
|
|
299
|
+
|
|
300
|
+
await client.request_completions(
|
|
301
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
302
|
+
model=fast_model,
|
|
303
|
+
message_type=MessageType.CHAT,
|
|
304
|
+
response_format_info=None
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Verify that create was called with the fast model that was passed (not overridden)
|
|
308
|
+
mock_create.assert_called_once()
|
|
309
|
+
call_args = mock_create.call_args
|
|
310
|
+
assert call_args[1]['model'] == fast_model
|
|
311
|
+
|
|
312
|
+
@pytest.mark.asyncio
|
|
313
|
+
async def test_anthropic_client_uses_smartest_model_from_provider_manager_without_override():
|
|
314
|
+
"""Test that Anthropic client uses the smartest model passed from ProviderManager without internal override."""
|
|
315
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
316
|
+
|
|
317
|
+
client = AnthropicClient(api_key="test_key")
|
|
318
|
+
|
|
319
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
320
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
321
|
+
# Create a mock response
|
|
322
|
+
mock_response = Message(
|
|
323
|
+
id="test_id",
|
|
324
|
+
role="assistant",
|
|
325
|
+
content=[TextBlock(type="text", text="test")],
|
|
326
|
+
model='anthropic-model-we-do-not-check',
|
|
327
|
+
type="message",
|
|
328
|
+
usage=Usage(input_tokens=0, output_tokens=0)
|
|
329
|
+
)
|
|
330
|
+
mock_create.return_value = mock_response
|
|
331
|
+
|
|
332
|
+
# Use a smartest model that would be selected by ProviderManager
|
|
333
|
+
smartest_model = get_smartest_model_for_selected_model("claude-haiku-4-5-20251001")
|
|
334
|
+
|
|
335
|
+
await client.request_completions(
|
|
336
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
337
|
+
model=smartest_model,
|
|
338
|
+
message_type=MessageType.CHAT,
|
|
339
|
+
response_format_info=None
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Verify that create was called with the smartest model that was passed (not overridden)
|
|
343
|
+
mock_create.assert_called_once()
|
|
344
|
+
call_args = mock_create.call_args
|
|
345
|
+
assert call_args[1]['model'] == smartest_model
|
|
346
|
+
|
|
347
|
+
@pytest.mark.asyncio
|
|
348
|
+
async def test_anthropic_client_stream_uses_fast_model_from_provider_manager_without_override():
|
|
349
|
+
"""Test that Anthropic client stream_completions uses the fast model passed from ProviderManager without internal override."""
|
|
350
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
351
|
+
|
|
352
|
+
client = AnthropicClient(api_key="test_key")
|
|
353
|
+
|
|
354
|
+
# Mock the beta.messages.create method for streaming
|
|
355
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
356
|
+
# Create a mock stream response
|
|
357
|
+
class MockStreamChunk:
|
|
358
|
+
def __init__(self, chunk_type, text=""):
|
|
359
|
+
self.type = chunk_type
|
|
360
|
+
if chunk_type == "content_block_delta":
|
|
361
|
+
self.delta = MagicMock()
|
|
362
|
+
self.delta.type = "text_delta"
|
|
363
|
+
self.delta.text = text
|
|
364
|
+
|
|
365
|
+
mock_stream = [
|
|
366
|
+
MockStreamChunk("content_block_delta", "test"),
|
|
367
|
+
MockStreamChunk("message_stop")
|
|
368
|
+
]
|
|
369
|
+
mock_create.return_value = iter(mock_stream)
|
|
370
|
+
|
|
371
|
+
# Use a fast model that would be selected by ProviderManager
|
|
372
|
+
fast_model = get_fast_model_for_selected_model("claude-sonnet-4-5-20250929")
|
|
373
|
+
|
|
374
|
+
reply_chunks = []
|
|
375
|
+
def mock_reply(chunk):
|
|
376
|
+
reply_chunks.append(chunk)
|
|
377
|
+
|
|
378
|
+
await client.stream_completions(
|
|
379
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
380
|
+
model=fast_model,
|
|
381
|
+
message_id="test-id",
|
|
382
|
+
message_type=MessageType.CHAT,
|
|
383
|
+
reply_fn=mock_reply
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Verify that create was called with the fast model that was passed (not overridden)
|
|
387
|
+
mock_create.assert_called_once()
|
|
388
|
+
call_args = mock_create.call_args
|
|
389
|
+
assert call_args[1]['model'] == fast_model
|
|
390
|
+
|
|
391
|
+
@pytest.mark.asyncio
|
|
392
|
+
async def test_anthropic_client_stream_uses_smartest_model_from_provider_manager_without_override():
|
|
393
|
+
"""Test that Anthropic client stream_completions uses the smartest model passed from ProviderManager without internal override."""
|
|
394
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
395
|
+
|
|
396
|
+
client = AnthropicClient(api_key="test_key")
|
|
397
|
+
|
|
398
|
+
# Mock the beta.messages.create method for streaming
|
|
399
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
400
|
+
# Create a mock stream response
|
|
401
|
+
class MockStreamChunk:
|
|
402
|
+
def __init__(self, chunk_type, text=""):
|
|
403
|
+
self.type = chunk_type
|
|
404
|
+
if chunk_type == "content_block_delta":
|
|
405
|
+
self.delta = MagicMock()
|
|
406
|
+
self.delta.type = "text_delta"
|
|
407
|
+
self.delta.text = text
|
|
408
|
+
|
|
409
|
+
mock_stream = [
|
|
410
|
+
MockStreamChunk("content_block_delta", "test"),
|
|
411
|
+
MockStreamChunk("message_stop")
|
|
412
|
+
]
|
|
413
|
+
mock_create.return_value = iter(mock_stream)
|
|
414
|
+
|
|
415
|
+
# Use a smartest model that would be selected by ProviderManager
|
|
416
|
+
smartest_model = get_smartest_model_for_selected_model("claude-haiku-4-5-20251001")
|
|
417
|
+
|
|
418
|
+
reply_chunks = []
|
|
419
|
+
def mock_reply(chunk):
|
|
420
|
+
reply_chunks.append(chunk)
|
|
421
|
+
|
|
422
|
+
await client.stream_completions(
|
|
423
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
424
|
+
model=smartest_model,
|
|
425
|
+
message_id="test-id",
|
|
426
|
+
message_type=MessageType.CHAT,
|
|
427
|
+
reply_fn=mock_reply
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
# Verify that create was called with the smartest model that was passed (not overridden)
|
|
431
|
+
mock_create.assert_called_once()
|
|
432
|
+
call_args = mock_create.call_args
|
|
433
|
+
assert call_args[1]['model'] == smartest_model
|
|
276
434
|
|
|
277
435
|
|
|
278
436
|
# Caching Tests
|
|
@@ -9,7 +9,7 @@ import pytest
|
|
|
9
9
|
from traitlets.config import Config
|
|
10
10
|
from openai.types.chat import ChatCompletionMessageParam
|
|
11
11
|
|
|
12
|
-
from mito_ai.
|
|
12
|
+
from mito_ai.provider_manager import ProviderManager
|
|
13
13
|
from mito_ai.completions.models import (
|
|
14
14
|
MessageType,
|
|
15
15
|
AICapabilities,
|
|
@@ -29,9 +29,9 @@ FAKE_AZURE_API_VERSION = "2024-12-01-preview"
|
|
|
29
29
|
|
|
30
30
|
@pytest.fixture
|
|
31
31
|
def provider_config() -> Config:
|
|
32
|
-
"""Create a proper Config object for the
|
|
32
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
33
33
|
config = Config()
|
|
34
|
-
config.
|
|
34
|
+
config.ProviderManager = Config()
|
|
35
35
|
config.OpenAIClient = Config()
|
|
36
36
|
return config
|
|
37
37
|
|
|
@@ -40,7 +40,7 @@ def provider_config() -> Config:
|
|
|
40
40
|
def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
41
41
|
"""Reset all environment variables before each test."""
|
|
42
42
|
for var in [
|
|
43
|
-
"OPENAI_API_KEY", "
|
|
43
|
+
"OPENAI_API_KEY", "ANTHROPIC_API_KEY", "GEMINI_API_KEY", "OLLAMA_MODEL",
|
|
44
44
|
"AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_MODEL",
|
|
45
45
|
"AZURE_OPENAI_API_VERSION"
|
|
46
46
|
]:
|
|
@@ -405,7 +405,7 @@ class TestAzureOpenAIStreamCompletions:
|
|
|
405
405
|
|
|
406
406
|
|
|
407
407
|
class TestAzureOpenAIProviderIntegration:
|
|
408
|
-
"""Test Azure OpenAI integration through the
|
|
408
|
+
"""Test Azure OpenAI integration through the ProviderManager."""
|
|
409
409
|
|
|
410
410
|
@pytest.mark.asyncio
|
|
411
411
|
@pytest.mark.parametrize("message_type", COMPLETION_MESSAGE_TYPES)
|
|
@@ -415,7 +415,7 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
415
415
|
provider_config: Config,
|
|
416
416
|
message_type: MessageType
|
|
417
417
|
) -> None:
|
|
418
|
-
"""Test that
|
|
418
|
+
"""Test that ProviderManager uses Azure OpenAI when gpt-4.1 is requested and Azure is configured."""
|
|
419
419
|
|
|
420
420
|
# Mock the response
|
|
421
421
|
mock_response = MagicMock()
|
|
@@ -428,7 +428,8 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
428
428
|
mock_azure_client.is_closed.return_value = False
|
|
429
429
|
mock_azure_client_class.return_value = mock_azure_client
|
|
430
430
|
|
|
431
|
-
provider =
|
|
431
|
+
provider = ProviderManager(config=provider_config)
|
|
432
|
+
provider.set_selected_model("gpt-4.1")
|
|
432
433
|
|
|
433
434
|
messages: List[ChatCompletionMessageParam] = [
|
|
434
435
|
{"role": "user", "content": "Test message"}
|
|
@@ -437,7 +438,6 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
437
438
|
completion = await provider.request_completions(
|
|
438
439
|
message_type=message_type,
|
|
439
440
|
messages=messages,
|
|
440
|
-
model="gpt-4.1"
|
|
441
441
|
)
|
|
442
442
|
|
|
443
443
|
# Verify the completion was returned
|
|
@@ -461,7 +461,7 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
461
461
|
provider_config: Config,
|
|
462
462
|
message_type: MessageType
|
|
463
463
|
) -> None:
|
|
464
|
-
"""Test that
|
|
464
|
+
"""Test that ProviderManager stream_completions uses Azure OpenAI when gpt-4.1 is requested and Azure is configured."""
|
|
465
465
|
|
|
466
466
|
# Mock the streaming response
|
|
467
467
|
mock_chunk1 = MagicMock()
|
|
@@ -484,7 +484,8 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
484
484
|
mock_azure_client.is_closed.return_value = False
|
|
485
485
|
mock_azure_client_class.return_value = mock_azure_client
|
|
486
486
|
|
|
487
|
-
provider =
|
|
487
|
+
provider = ProviderManager(config=provider_config)
|
|
488
|
+
provider.set_selected_model("gpt-4.1")
|
|
488
489
|
|
|
489
490
|
messages: List[ChatCompletionMessageParam] = [
|
|
490
491
|
{"role": "user", "content": "Test message"}
|
|
@@ -497,7 +498,6 @@ class TestAzureOpenAIProviderIntegration:
|
|
|
497
498
|
completion = await provider.stream_completions(
|
|
498
499
|
message_type=message_type,
|
|
499
500
|
messages=messages,
|
|
500
|
-
model="gpt-4.1",
|
|
501
501
|
message_id="test-id",
|
|
502
502
|
thread_id="test-thread",
|
|
503
503
|
reply_fn=mock_reply
|
|
@@ -554,8 +554,8 @@ class TestAzureOpenAIConfigurationPriority:
|
|
|
554
554
|
"""Test that Azure OpenAI is used even when Claude key is available."""
|
|
555
555
|
|
|
556
556
|
# Set Claude key (this should be overridden by Azure OpenAI)
|
|
557
|
-
monkeypatch.setenv("
|
|
558
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
557
|
+
monkeypatch.setenv("ANTHROPIC_API_KEY", "claude-key")
|
|
558
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", "claude-key")
|
|
559
559
|
|
|
560
560
|
with patch("openai.AsyncAzureOpenAI") as mock_azure_client:
|
|
561
561
|
openai_client = OpenAIClient(config=provider_config)
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
import pytest
|
|
5
5
|
from unittest.mock import MagicMock, patch
|
|
6
|
-
from mito_ai.
|
|
6
|
+
from mito_ai.provider_manager import ProviderManager
|
|
7
7
|
from mito_ai.tests.providers.utils import mock_azure_openai_client, mock_openai_client, patch_server_limits
|
|
8
8
|
from traitlets.config import Config
|
|
9
9
|
|
|
@@ -11,9 +11,9 @@ FAKE_API_KEY = "sk-1234567890"
|
|
|
11
11
|
|
|
12
12
|
@pytest.fixture
|
|
13
13
|
def provider_config() -> Config:
|
|
14
|
-
"""Create a proper Config object for the
|
|
14
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
15
15
|
config = Config()
|
|
16
|
-
config.
|
|
16
|
+
config.ProviderManager = Config()
|
|
17
17
|
config.OpenAIClient = Config()
|
|
18
18
|
return config
|
|
19
19
|
|
|
@@ -22,7 +22,7 @@ def provider_config() -> Config:
|
|
|
22
22
|
"name": "mito_server_fallback_no_keys",
|
|
23
23
|
"setup": {
|
|
24
24
|
"OPENAI_API_KEY": None,
|
|
25
|
-
"
|
|
25
|
+
"ANTHROPIC_API_KEY": None,
|
|
26
26
|
"GEMINI_API_KEY": None,
|
|
27
27
|
"is_azure_configured": False,
|
|
28
28
|
},
|
|
@@ -33,45 +33,45 @@ def provider_config() -> Config:
|
|
|
33
33
|
"name": "claude_when_only_claude_key",
|
|
34
34
|
"setup": {
|
|
35
35
|
"OPENAI_API_KEY": None,
|
|
36
|
-
"
|
|
36
|
+
"ANTHROPIC_API_KEY": "claude-test-key",
|
|
37
37
|
"GEMINI_API_KEY": None,
|
|
38
38
|
"is_azure_configured": False,
|
|
39
39
|
},
|
|
40
40
|
"expected_provider": "Claude",
|
|
41
|
-
"expected_key_type": "
|
|
41
|
+
"expected_key_type": "user_key"
|
|
42
42
|
},
|
|
43
43
|
{
|
|
44
44
|
"name": "gemini_when_only_gemini_key",
|
|
45
45
|
"setup": {
|
|
46
46
|
"OPENAI_API_KEY": None,
|
|
47
|
-
"
|
|
47
|
+
"ANTHROPIC_API_KEY": None,
|
|
48
48
|
"GEMINI_API_KEY": "gemini-test-key",
|
|
49
49
|
"is_azure_configured": False,
|
|
50
50
|
},
|
|
51
51
|
"expected_provider": "Gemini",
|
|
52
|
-
"expected_key_type": "
|
|
52
|
+
"expected_key_type": "user_key"
|
|
53
53
|
},
|
|
54
54
|
{
|
|
55
55
|
"name": "openai_when_openai_key",
|
|
56
56
|
"setup": {
|
|
57
57
|
"OPENAI_API_KEY": 'openai-test-key',
|
|
58
|
-
"
|
|
58
|
+
"ANTHROPIC_API_KEY": None,
|
|
59
59
|
"GEMINI_API_KEY": None,
|
|
60
60
|
"is_azure_configured": False,
|
|
61
61
|
},
|
|
62
|
-
"expected_provider": "OpenAI
|
|
62
|
+
"expected_provider": "OpenAI",
|
|
63
63
|
"expected_key_type": "user_key"
|
|
64
64
|
},
|
|
65
65
|
{
|
|
66
66
|
"name": "claude_priority_over_gemini",
|
|
67
67
|
"setup": {
|
|
68
68
|
"OPENAI_API_KEY": None,
|
|
69
|
-
"
|
|
69
|
+
"ANTHROPIC_API_KEY": "claude-test-key",
|
|
70
70
|
"GEMINI_API_KEY": "gemini-test-key",
|
|
71
71
|
"is_azure_configured": False,
|
|
72
72
|
},
|
|
73
73
|
"expected_provider": "Claude",
|
|
74
|
-
"expected_key_type": "
|
|
74
|
+
"expected_key_type": "user_key"
|
|
75
75
|
},
|
|
76
76
|
])
|
|
77
77
|
def test_provider_capabilities_real_logic(
|
|
@@ -79,7 +79,7 @@ def test_provider_capabilities_real_logic(
|
|
|
79
79
|
monkeypatch: pytest.MonkeyPatch,
|
|
80
80
|
provider_config: Config
|
|
81
81
|
) -> None:
|
|
82
|
-
"""Test the actual provider selection logic in
|
|
82
|
+
"""Test the actual provider selection logic in ProviderManager.capabilities"""
|
|
83
83
|
|
|
84
84
|
# Set up the environment based on test case
|
|
85
85
|
setup = test_case["setup"]
|
|
@@ -97,9 +97,6 @@ def test_provider_capabilities_real_logic(
|
|
|
97
97
|
else:
|
|
98
98
|
monkeypatch.setattr(f"mito_ai.constants.{key}", value)
|
|
99
99
|
|
|
100
|
-
# Clear the provider config API key to ensure it uses constants
|
|
101
|
-
provider_config.OpenAIProvider.api_key = None
|
|
102
|
-
|
|
103
100
|
# Mock HTTP calls but let the real logic run
|
|
104
101
|
with patch("openai.OpenAI") as mock_openai_constructor:
|
|
105
102
|
with patch("openai.AsyncOpenAI") as mock_async_openai:
|
|
@@ -112,7 +109,7 @@ def test_provider_capabilities_real_logic(
|
|
|
112
109
|
# Mock server limits for Mito server fallback
|
|
113
110
|
with patch_server_limits():
|
|
114
111
|
# NOW create the provider after ALL mocks are set up
|
|
115
|
-
llm =
|
|
112
|
+
llm = ProviderManager(config=provider_config)
|
|
116
113
|
|
|
117
114
|
# Test capabilities
|
|
118
115
|
capabilities = llm.capabilities
|