mito-ai 0.1.57__py3-none-any.whl → 0.1.59__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +19 -22
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +24 -14
- mito_ai/chart_wizard/handlers.py +78 -17
- mito_ai/chart_wizard/urls.py +8 -5
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
- mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
- mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
- mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
- mito_ai/completions/completion_handlers/completion_handler.py +3 -5
- mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
- mito_ai/completions/completion_handlers/scratchpad_result_handler.py +6 -8
- mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
- mito_ai/completions/completion_handlers/utils.py +3 -7
- mito_ai/completions/handlers.py +32 -22
- mito_ai/completions/message_history.py +8 -10
- mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +2 -0
- mito_ai/constants.py +31 -2
- mito_ai/enterprise/__init__.py +1 -1
- mito_ai/enterprise/litellm_client.py +144 -0
- mito_ai/enterprise/utils.py +16 -2
- mito_ai/log/handlers.py +1 -1
- mito_ai/openai_client.py +36 -96
- mito_ai/provider_manager.py +420 -0
- mito_ai/settings/enterprise_handler.py +26 -0
- mito_ai/settings/urls.py +2 -0
- mito_ai/streamlit_conversion/agent_utils.py +2 -30
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
- mito_ai/streamlit_preview/handlers.py +6 -3
- mito_ai/streamlit_preview/urls.py +5 -3
- mito_ai/tests/message_history/test_generate_short_chat_name.py +103 -28
- mito_ai/tests/open_ai_utils_test.py +34 -36
- mito_ai/tests/providers/test_anthropic_client.py +174 -16
- mito_ai/tests/providers/test_azure.py +15 -15
- mito_ai/tests/providers/test_capabilities.py +14 -17
- mito_ai/tests/providers/test_gemini_client.py +14 -13
- mito_ai/tests/providers/test_model_resolution.py +145 -89
- mito_ai/tests/providers/test_openai_client.py +209 -13
- mito_ai/tests/providers/test_provider_limits.py +5 -5
- mito_ai/tests/providers/test_providers.py +229 -51
- mito_ai/tests/providers/test_retry_logic.py +13 -22
- mito_ai/tests/providers/utils.py +4 -4
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
- mito_ai/tests/test_constants.py +90 -0
- mito_ai/tests/test_enterprise_mode.py +217 -0
- mito_ai/tests/test_model_utils.py +362 -0
- mito_ai/utils/anthropic_utils.py +8 -6
- mito_ai/utils/gemini_utils.py +0 -3
- mito_ai/utils/litellm_utils.py +84 -0
- mito_ai/utils/model_utils.py +257 -0
- mito_ai/utils/open_ai_utils.py +29 -41
- mito_ai/utils/provider_utils.py +13 -29
- mito_ai/utils/telemetry_utils.py +14 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js +1059 -144
- mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js.map +1 -0
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js +17 -17
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js.map +1 -1
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +78 -78
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/METADATA +2 -1
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/RECORD +90 -83
- mito_ai/completions/providers.py +0 -284
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +0 -1
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,15 +11,18 @@ from mito_ai.utils.telemetry_utils import log_streamlit_app_conversion_error, lo
|
|
|
11
11
|
from mito_ai.completions.models import MessageType
|
|
12
12
|
from mito_ai.utils.error_classes import StreamlitConversionError, StreamlitPreviewError
|
|
13
13
|
from mito_ai.streamlit_conversion.streamlit_agent_handler import streamlit_handler
|
|
14
|
+
from mito_ai.provider_manager import ProviderManager
|
|
14
15
|
import traceback
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
class StreamlitPreviewHandler(APIHandler):
|
|
18
19
|
"""REST handler for streamlit preview operations."""
|
|
19
20
|
|
|
20
|
-
def initialize(self) -> None:
|
|
21
|
+
def initialize(self, llm: ProviderManager) -> None:
|
|
21
22
|
"""Initialize the handler."""
|
|
23
|
+
super().initialize()
|
|
22
24
|
self.preview_manager = StreamlitPreviewManager()
|
|
25
|
+
self._llm = llm
|
|
23
26
|
|
|
24
27
|
@tornado.web.authenticated
|
|
25
28
|
|
|
@@ -45,11 +48,11 @@ class StreamlitPreviewHandler(APIHandler):
|
|
|
45
48
|
print("[Mito AI] Force recreating streamlit app")
|
|
46
49
|
|
|
47
50
|
# Create a new app
|
|
48
|
-
await streamlit_handler(True, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
51
|
+
await streamlit_handler(True, absolute_notebook_path, app_file_name, streamlit_app_prompt, self._llm)
|
|
49
52
|
elif streamlit_app_prompt != '':
|
|
50
53
|
# Update an existing app if there is a prompt provided. Otherwise, the user is just
|
|
51
54
|
# starting an existing app so we can skip the streamlit_handler all together
|
|
52
|
-
await streamlit_handler(False, absolute_notebook_path, app_file_name, streamlit_app_prompt)
|
|
55
|
+
await streamlit_handler(False, absolute_notebook_path, app_file_name, streamlit_app_prompt, self._llm)
|
|
53
56
|
|
|
54
57
|
# Start preview
|
|
55
58
|
# TODO: There's a bug here where when the user rebuilds and already running app. Instead of
|
|
@@ -4,12 +4,14 @@
|
|
|
4
4
|
from typing import Any, List, Tuple
|
|
5
5
|
from jupyter_server.utils import url_path_join
|
|
6
6
|
from mito_ai.streamlit_preview.handlers import StreamlitPreviewHandler
|
|
7
|
+
from mito_ai.provider_manager import ProviderManager
|
|
7
8
|
|
|
8
|
-
def get_streamlit_preview_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
9
|
+
def get_streamlit_preview_urls(base_url: str, provider_manager: ProviderManager) -> List[Tuple[str, Any, dict]]:
|
|
9
10
|
"""Get all streamlit preview related URL patterns.
|
|
10
11
|
|
|
11
12
|
Args:
|
|
12
13
|
base_url: The base URL for the Jupyter server
|
|
14
|
+
provider_manager: The ProviderManager instance
|
|
13
15
|
|
|
14
16
|
Returns:
|
|
15
17
|
List of (url_pattern, handler_class, handler_kwargs) tuples
|
|
@@ -17,6 +19,6 @@ def get_streamlit_preview_urls(base_url: str) -> List[Tuple[str, Any, dict]]:
|
|
|
17
19
|
BASE_URL = base_url + "/mito-ai"
|
|
18
20
|
|
|
19
21
|
return [
|
|
20
|
-
(url_path_join(BASE_URL, "streamlit-preview"), StreamlitPreviewHandler, {}),
|
|
21
|
-
(url_path_join(BASE_URL, "streamlit-preview/(.+)"), StreamlitPreviewHandler, {}),
|
|
22
|
+
(url_path_join(BASE_URL, "streamlit-preview"), StreamlitPreviewHandler, {"llm": provider_manager}),
|
|
23
|
+
(url_path_join(BASE_URL, "streamlit-preview/(.+)"), StreamlitPreviewHandler, {"llm": provider_manager}),
|
|
22
24
|
]
|
|
@@ -5,24 +5,26 @@ import pytest
|
|
|
5
5
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
6
6
|
from traitlets.config import Config
|
|
7
7
|
from mito_ai.completions.message_history import generate_short_chat_name
|
|
8
|
-
from mito_ai.
|
|
8
|
+
from mito_ai.provider_manager import ProviderManager
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
@pytest.fixture
|
|
12
12
|
def provider_config() -> Config:
|
|
13
|
-
"""Create a proper Config object for the
|
|
13
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
14
14
|
config = Config()
|
|
15
|
-
config.
|
|
15
|
+
config.ProviderManager = Config()
|
|
16
16
|
config.OpenAIClient = Config()
|
|
17
17
|
return config
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
# Test cases for different models and their expected providers/fast models
|
|
21
21
|
PROVIDER_TEST_CASES = [
|
|
22
|
-
# (model, client_patch_path)
|
|
23
|
-
("gpt-4.1", "mito_ai.
|
|
24
|
-
("claude-
|
|
25
|
-
("gemini-
|
|
22
|
+
# (model, client_patch_path) - patch where the classes are used (in provider_manager)
|
|
23
|
+
("gpt-4.1", "mito_ai.provider_manager.OpenAIClient"),
|
|
24
|
+
("claude-sonnet-4-5-20250929", "mito_ai.provider_manager.AnthropicClient"),
|
|
25
|
+
("gemini-3-flash-preview", "mito_ai.provider_manager.GeminiClient"),
|
|
26
|
+
("litellm/openai/gpt-4o", "mito_ai.provider_manager.LiteLLMClient"), # LiteLLM test case
|
|
27
|
+
("Abacus/gpt-4.1", "mito_ai.provider_manager.OpenAIClient"), # Abacus test case (uses OpenAIClient)
|
|
26
28
|
]
|
|
27
29
|
|
|
28
30
|
@pytest.mark.parametrize("selected_model,client_patch_path", PROVIDER_TEST_CASES)
|
|
@@ -37,31 +39,107 @@ async def test_generate_short_chat_name_uses_correct_provider_and_fast_model(
|
|
|
37
39
|
|
|
38
40
|
# Set up environment variables for all providers
|
|
39
41
|
monkeypatch.setenv("OPENAI_API_KEY", "fake-openai-key")
|
|
40
|
-
monkeypatch.setenv("
|
|
42
|
+
monkeypatch.setenv("ANTHROPIC_API_KEY", "fake-claude-key")
|
|
41
43
|
monkeypatch.setenv("GEMINI_API_KEY", "fake-gemini-key")
|
|
42
44
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "fake-openai-key")
|
|
43
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
45
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", "fake-claude-key")
|
|
44
46
|
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", "fake-gemini-key")
|
|
45
47
|
|
|
48
|
+
# Set up LiteLLM constants if testing LiteLLM
|
|
49
|
+
if "LiteLLMClient" in client_patch_path:
|
|
50
|
+
# Patch constants both at the source and where they're imported in model_utils
|
|
51
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_BASE_URL", "https://litellm-server.com")
|
|
52
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_API_KEY", "fake-litellm-key")
|
|
53
|
+
monkeypatch.setattr("mito_ai.constants.LITELLM_MODELS", ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"])
|
|
54
|
+
# Also patch where constants is imported in model_utils (where get_available_models uses it)
|
|
55
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_BASE_URL", "https://litellm-server.com")
|
|
56
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_MODELS", ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"])
|
|
57
|
+
# Mock is_enterprise to return True so LiteLLM models are available
|
|
58
|
+
monkeypatch.setattr("mito_ai.utils.version_utils.is_enterprise", lambda: True)
|
|
59
|
+
|
|
60
|
+
# Set up Abacus constants if testing Abacus
|
|
61
|
+
if selected_model.startswith("Abacus/"):
|
|
62
|
+
# Patch constants both at the source and where they're imported in model_utils
|
|
63
|
+
monkeypatch.setattr("mito_ai.constants.ABACUS_BASE_URL", "https://routellm.abacus.ai/v1")
|
|
64
|
+
monkeypatch.setattr("mito_ai.constants.ABACUS_API_KEY", "fake-abacus-key")
|
|
65
|
+
monkeypatch.setattr("mito_ai.constants.ABACUS_MODELS", ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"])
|
|
66
|
+
# Also patch where constants is imported in model_utils (where get_available_models uses it)
|
|
67
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.ABACUS_BASE_URL", "https://routellm.abacus.ai/v1")
|
|
68
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.constants.ABACUS_MODELS", ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"])
|
|
69
|
+
# Mock is_abacus_configured to return True so Abacus models are available
|
|
70
|
+
monkeypatch.setattr("mito_ai.utils.model_utils.is_abacus_configured", lambda: True)
|
|
71
|
+
# Mock is_enterprise to return True so enterprise models are available
|
|
72
|
+
monkeypatch.setattr("mito_ai.utils.version_utils.is_enterprise", lambda: True)
|
|
73
|
+
|
|
46
74
|
# Create mock client for the specific provider being tested
|
|
47
75
|
mock_client = MagicMock()
|
|
48
76
|
mock_client.request_completions = AsyncMock(return_value="Test Chat Name")
|
|
49
77
|
|
|
78
|
+
# Create the ProviderManager first
|
|
79
|
+
llm_provider = ProviderManager(config=provider_config)
|
|
80
|
+
|
|
81
|
+
# Set the selected model (this is required for the ProviderManager to use the correct model)
|
|
82
|
+
llm_provider.set_selected_model(selected_model)
|
|
83
|
+
|
|
50
84
|
# Patch the specific client class that should be used based on the model
|
|
51
|
-
#
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
85
|
+
# For Anthropic, Gemini, and LiteLLM, new instances are created in request_completions, so we patch the class
|
|
86
|
+
# For OpenAI, the instance is created in __init__, so we patch the instance's method
|
|
87
|
+
if "AnthropicClient" in client_patch_path:
|
|
88
|
+
with patch(client_patch_path, return_value=mock_client):
|
|
89
|
+
result = await generate_short_chat_name(
|
|
90
|
+
user_message="What is the capital of France?",
|
|
91
|
+
assistant_message="The capital of France is Paris.",
|
|
92
|
+
llm_provider=llm_provider
|
|
93
|
+
)
|
|
94
|
+
elif "GeminiClient" in client_patch_path:
|
|
95
|
+
with patch(client_patch_path, return_value=mock_client):
|
|
96
|
+
result = await generate_short_chat_name(
|
|
97
|
+
user_message="What is the capital of France?",
|
|
98
|
+
assistant_message="The capital of France is Paris.",
|
|
99
|
+
llm_provider=llm_provider
|
|
100
|
+
)
|
|
101
|
+
elif "LiteLLMClient" in client_patch_path:
|
|
102
|
+
# Patch LiteLLMClient where it's defined (it's imported inside request_completions)
|
|
103
|
+
# Also patch get_available_models to return LiteLLM models
|
|
104
|
+
with patch("mito_ai.enterprise.litellm_client.LiteLLMClient", return_value=mock_client), \
|
|
105
|
+
patch("mito_ai.provider_manager.get_available_models", return_value=["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]):
|
|
106
|
+
result = await generate_short_chat_name(
|
|
107
|
+
user_message="What is the capital of France?",
|
|
108
|
+
assistant_message="The capital of France is Paris.",
|
|
109
|
+
llm_provider=llm_provider
|
|
110
|
+
)
|
|
111
|
+
elif selected_model.startswith("Abacus/"):
|
|
112
|
+
# For Abacus, it uses OpenAIClient, so patch the instance's method
|
|
113
|
+
# Also patch get_available_models to return Abacus models
|
|
114
|
+
assert llm_provider._openai_client is not None, "OpenAI client should be initialized for Abacus"
|
|
115
|
+
with patch.object(llm_provider._openai_client, 'request_completions', new_callable=AsyncMock, return_value="Test Chat Name") as mock_abacus_request, \
|
|
116
|
+
patch("mito_ai.provider_manager.get_available_models", return_value=["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"]):
|
|
117
|
+
result = await generate_short_chat_name(
|
|
118
|
+
user_message="What is the capital of France?",
|
|
119
|
+
assistant_message="The capital of France is Paris.",
|
|
120
|
+
llm_provider=llm_provider
|
|
121
|
+
)
|
|
122
|
+
# Verify that the OpenAI client's request_completions was called (Abacus uses OpenAIClient)
|
|
123
|
+
mock_abacus_request.assert_called_once() # type: ignore
|
|
124
|
+
# As a double check, if we have used the correct client, then we must get the correct result
|
|
125
|
+
assert result == "Test Chat Name"
|
|
126
|
+
return
|
|
127
|
+
else: # OpenAI
|
|
128
|
+
# For OpenAI, patch the instance's method since the client is created in __init__
|
|
129
|
+
assert llm_provider._openai_client is not None, "OpenAI client should be initialized"
|
|
130
|
+
with patch.object(llm_provider._openai_client, 'request_completions', new_callable=AsyncMock, return_value="Test Chat Name") as mock_openai_request:
|
|
131
|
+
result = await generate_short_chat_name(
|
|
132
|
+
user_message="What is the capital of France?",
|
|
133
|
+
assistant_message="The capital of France is Paris.",
|
|
134
|
+
llm_provider=llm_provider
|
|
135
|
+
)
|
|
136
|
+
# Verify that the OpenAI client's request_completions was called
|
|
137
|
+
mock_openai_request.assert_called_once() # type: ignore
|
|
138
|
+
# As a double check, if we have used the correct client, then we must get the correct result
|
|
139
|
+
assert result == "Test Chat Name"
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
# Verify that the correct client's request_completions was called (for Anthropic, Gemini, and LiteLLM)
|
|
65
143
|
mock_client.request_completions.assert_called_once()
|
|
66
144
|
|
|
67
145
|
# As a double check, if we have used the correct client, then we must get the correct result
|
|
@@ -74,13 +152,12 @@ async def test_generate_short_chat_name_cleans_gemini_response() -> None:
|
|
|
74
152
|
"""Test that generate_short_chat_name properly cleans Gemini-style responses with quotes and newlines."""
|
|
75
153
|
|
|
76
154
|
# Create mock llm_provider that returns a response with quotes and newlines
|
|
77
|
-
mock_llm_provider = MagicMock(spec=
|
|
155
|
+
mock_llm_provider = MagicMock(spec=ProviderManager)
|
|
78
156
|
mock_llm_provider.request_completions = AsyncMock(return_value='"France Geography Discussion\n"')
|
|
79
157
|
|
|
80
158
|
result = await generate_short_chat_name(
|
|
81
159
|
user_message="What is the capital of France?",
|
|
82
160
|
assistant_message="The capital of France is Paris.",
|
|
83
|
-
model="gemini-2.0-flash-exp",
|
|
84
161
|
llm_provider=mock_llm_provider
|
|
85
162
|
)
|
|
86
163
|
|
|
@@ -95,13 +172,12 @@ async def test_generate_short_chat_name_handles_empty_response() -> None:
|
|
|
95
172
|
"""Test that generate_short_chat_name handles empty or None responses gracefully."""
|
|
96
173
|
|
|
97
174
|
# Test with empty string response
|
|
98
|
-
mock_llm_provider = MagicMock(spec=
|
|
175
|
+
mock_llm_provider = MagicMock(spec=ProviderManager)
|
|
99
176
|
mock_llm_provider.request_completions = AsyncMock(return_value="")
|
|
100
177
|
|
|
101
178
|
result = await generate_short_chat_name(
|
|
102
179
|
user_message="Test message",
|
|
103
180
|
assistant_message="Test response",
|
|
104
|
-
model="gpt-4.1",
|
|
105
181
|
llm_provider=mock_llm_provider
|
|
106
182
|
)
|
|
107
183
|
|
|
@@ -113,7 +189,6 @@ async def test_generate_short_chat_name_handles_empty_response() -> None:
|
|
|
113
189
|
result = await generate_short_chat_name(
|
|
114
190
|
user_message="Test message",
|
|
115
191
|
assistant_message="Test response",
|
|
116
|
-
model="gpt-4.1",
|
|
117
192
|
llm_provider=mock_llm_provider
|
|
118
193
|
)
|
|
119
194
|
|
|
@@ -104,17 +104,16 @@ def test_prepare_request_data_and_headers_null_message() -> None:
|
|
|
104
104
|
with patch("mito_ai.utils.open_ai_utils.get_user_field") as mock_get_user_field:
|
|
105
105
|
mock_get_user_field.side_effect = ["test@example.com", "user123"]
|
|
106
106
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
assert data["user_input"] == ""
|
|
107
|
+
data, _ = _prepare_request_data_and_headers(
|
|
108
|
+
last_message_content=None,
|
|
109
|
+
ai_completion_data={},
|
|
110
|
+
timeout=30,
|
|
111
|
+
max_retries=3,
|
|
112
|
+
message_type=MessageType.CHAT
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Verify empty string is used for null message
|
|
116
|
+
assert data["user_input"] == ""
|
|
118
117
|
|
|
119
118
|
def test_prepare_request_data_and_headers_caches_user_info() -> None:
|
|
120
119
|
"""Test that user info is cached after first call"""
|
|
@@ -125,28 +124,27 @@ def test_prepare_request_data_and_headers_caches_user_info() -> None:
|
|
|
125
124
|
|
|
126
125
|
mock_get_user_field.side_effect = ["test@example.com", "user123"]
|
|
127
126
|
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
assert data1["user_id"] == data2["user_id"] == "user123"
|
|
127
|
+
# First call
|
|
128
|
+
data1, _ = _prepare_request_data_and_headers(
|
|
129
|
+
last_message_content="test",
|
|
130
|
+
ai_completion_data={},
|
|
131
|
+
timeout=30,
|
|
132
|
+
max_retries=3,
|
|
133
|
+
message_type=MessageType.CHAT
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Second call
|
|
137
|
+
data2, _ = _prepare_request_data_and_headers(
|
|
138
|
+
last_message_content="test",
|
|
139
|
+
ai_completion_data={},
|
|
140
|
+
timeout=30,
|
|
141
|
+
max_retries=3,
|
|
142
|
+
message_type=MessageType.CHAT
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Verify get_user_field was only called twice (once for email, once for user_id)
|
|
146
|
+
assert mock_get_user_field.call_count == 2
|
|
147
|
+
|
|
148
|
+
# Verify both calls return same user info
|
|
149
|
+
assert data1["email"] == data2["email"] == "test@example.com"
|
|
150
|
+
assert data1["user_id"] == data2["user_id"] == "user123"
|
|
@@ -3,11 +3,10 @@
|
|
|
3
3
|
|
|
4
4
|
import pytest
|
|
5
5
|
from mito_ai.anthropic_client import get_anthropic_system_prompt_and_messages, get_anthropic_system_prompt_and_messages_with_caching, add_cache_control_to_message, extract_and_parse_anthropic_json_response, AnthropicClient
|
|
6
|
-
from mito_ai.utils.anthropic_utils import FAST_ANTHROPIC_MODEL
|
|
7
6
|
from anthropic.types import Message, TextBlock, ToolUseBlock, Usage, ToolUseBlock, Message, Usage, TextBlock
|
|
8
7
|
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionSystemMessageParam
|
|
9
8
|
from mito_ai.completions.models import MessageType
|
|
10
|
-
from unittest.mock import patch
|
|
9
|
+
from unittest.mock import MagicMock, patch
|
|
11
10
|
import anthropic
|
|
12
11
|
from typing import List, Dict, cast
|
|
13
12
|
|
|
@@ -233,24 +232,25 @@ def test_tool_use_without_agent_response():
|
|
|
233
232
|
assert "No valid AgentResponse format found" in str(exc_info.value)
|
|
234
233
|
|
|
235
234
|
CUSTOM_MODEL = "smart-anthropic-model"
|
|
236
|
-
@pytest.mark.parametrize("message_type
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
235
|
+
@pytest.mark.parametrize("message_type", [
|
|
236
|
+
MessageType.CHAT,
|
|
237
|
+
MessageType.SMART_DEBUG,
|
|
238
|
+
MessageType.CODE_EXPLAIN,
|
|
239
|
+
MessageType.AGENT_EXECUTION,
|
|
240
|
+
MessageType.AGENT_AUTO_ERROR_FIXUP,
|
|
241
|
+
MessageType.INLINE_COMPLETION,
|
|
242
|
+
MessageType.CHAT_NAME_GENERATION,
|
|
244
243
|
])
|
|
245
244
|
@pytest.mark.asyncio
|
|
246
|
-
async def
|
|
245
|
+
async def test_model_selection_uses_passed_model(message_type):
|
|
247
246
|
"""
|
|
248
|
-
Tests that the
|
|
247
|
+
Tests that the model passed to the client is used as-is.
|
|
248
|
+
Model selection based on message type is now handled by ProviderManager.
|
|
249
249
|
"""
|
|
250
250
|
client = AnthropicClient(api_key="test_key")
|
|
251
251
|
|
|
252
|
-
# Mock the messages.create method directly
|
|
253
|
-
with patch.object(client.client.messages, 'create') as mock_create: # type: ignore
|
|
252
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
253
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
254
254
|
# Create a mock response
|
|
255
255
|
mock_response = Message(
|
|
256
256
|
id="test_id",
|
|
@@ -269,10 +269,168 @@ async def test_model_selection_based_on_message_type(message_type, expected_mode
|
|
|
269
269
|
response_format_info=None
|
|
270
270
|
)
|
|
271
271
|
|
|
272
|
-
# Verify that create was called with the
|
|
272
|
+
# Verify that create was called with the model that was passed (not overridden)
|
|
273
273
|
mock_create.assert_called_once()
|
|
274
274
|
call_args = mock_create.call_args
|
|
275
|
-
assert call_args[1]['model'] ==
|
|
275
|
+
assert call_args[1]['model'] == CUSTOM_MODEL
|
|
276
|
+
|
|
277
|
+
@pytest.mark.asyncio
|
|
278
|
+
async def test_anthropic_client_uses_fast_model_from_provider_manager_without_override():
|
|
279
|
+
"""Test that Anthropic client uses the fast model passed from ProviderManager without internal override."""
|
|
280
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
281
|
+
|
|
282
|
+
client = AnthropicClient(api_key="test_key")
|
|
283
|
+
|
|
284
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
285
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
286
|
+
# Create a mock response
|
|
287
|
+
mock_response = Message(
|
|
288
|
+
id="test_id",
|
|
289
|
+
role="assistant",
|
|
290
|
+
content=[TextBlock(type="text", text="test")],
|
|
291
|
+
model='anthropic-model-we-do-not-check',
|
|
292
|
+
type="message",
|
|
293
|
+
usage=Usage(input_tokens=0, output_tokens=0)
|
|
294
|
+
)
|
|
295
|
+
mock_create.return_value = mock_response
|
|
296
|
+
|
|
297
|
+
# Use a fast model that would be selected by ProviderManager
|
|
298
|
+
fast_model = get_fast_model_for_selected_model("claude-sonnet-4-5-20250929")
|
|
299
|
+
|
|
300
|
+
await client.request_completions(
|
|
301
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
302
|
+
model=fast_model,
|
|
303
|
+
message_type=MessageType.CHAT,
|
|
304
|
+
response_format_info=None
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Verify that create was called with the fast model that was passed (not overridden)
|
|
308
|
+
mock_create.assert_called_once()
|
|
309
|
+
call_args = mock_create.call_args
|
|
310
|
+
assert call_args[1]['model'] == fast_model
|
|
311
|
+
|
|
312
|
+
@pytest.mark.asyncio
|
|
313
|
+
async def test_anthropic_client_uses_smartest_model_from_provider_manager_without_override():
|
|
314
|
+
"""Test that Anthropic client uses the smartest model passed from ProviderManager without internal override."""
|
|
315
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
316
|
+
|
|
317
|
+
client = AnthropicClient(api_key="test_key")
|
|
318
|
+
|
|
319
|
+
# Mock the beta.messages.create method directly (we now use beta API)
|
|
320
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
321
|
+
# Create a mock response
|
|
322
|
+
mock_response = Message(
|
|
323
|
+
id="test_id",
|
|
324
|
+
role="assistant",
|
|
325
|
+
content=[TextBlock(type="text", text="test")],
|
|
326
|
+
model='anthropic-model-we-do-not-check',
|
|
327
|
+
type="message",
|
|
328
|
+
usage=Usage(input_tokens=0, output_tokens=0)
|
|
329
|
+
)
|
|
330
|
+
mock_create.return_value = mock_response
|
|
331
|
+
|
|
332
|
+
# Use a smartest model that would be selected by ProviderManager
|
|
333
|
+
smartest_model = get_smartest_model_for_selected_model("claude-haiku-4-5-20251001")
|
|
334
|
+
|
|
335
|
+
await client.request_completions(
|
|
336
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
337
|
+
model=smartest_model,
|
|
338
|
+
message_type=MessageType.CHAT,
|
|
339
|
+
response_format_info=None
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Verify that create was called with the smartest model that was passed (not overridden)
|
|
343
|
+
mock_create.assert_called_once()
|
|
344
|
+
call_args = mock_create.call_args
|
|
345
|
+
assert call_args[1]['model'] == smartest_model
|
|
346
|
+
|
|
347
|
+
@pytest.mark.asyncio
|
|
348
|
+
async def test_anthropic_client_stream_uses_fast_model_from_provider_manager_without_override():
|
|
349
|
+
"""Test that Anthropic client stream_completions uses the fast model passed from ProviderManager without internal override."""
|
|
350
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
351
|
+
|
|
352
|
+
client = AnthropicClient(api_key="test_key")
|
|
353
|
+
|
|
354
|
+
# Mock the beta.messages.create method for streaming
|
|
355
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
356
|
+
# Create a mock stream response
|
|
357
|
+
class MockStreamChunk:
|
|
358
|
+
def __init__(self, chunk_type, text=""):
|
|
359
|
+
self.type = chunk_type
|
|
360
|
+
if chunk_type == "content_block_delta":
|
|
361
|
+
self.delta = MagicMock()
|
|
362
|
+
self.delta.type = "text_delta"
|
|
363
|
+
self.delta.text = text
|
|
364
|
+
|
|
365
|
+
mock_stream = [
|
|
366
|
+
MockStreamChunk("content_block_delta", "test"),
|
|
367
|
+
MockStreamChunk("message_stop")
|
|
368
|
+
]
|
|
369
|
+
mock_create.return_value = iter(mock_stream)
|
|
370
|
+
|
|
371
|
+
# Use a fast model that would be selected by ProviderManager
|
|
372
|
+
fast_model = get_fast_model_for_selected_model("claude-sonnet-4-5-20250929")
|
|
373
|
+
|
|
374
|
+
reply_chunks = []
|
|
375
|
+
def mock_reply(chunk):
|
|
376
|
+
reply_chunks.append(chunk)
|
|
377
|
+
|
|
378
|
+
await client.stream_completions(
|
|
379
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
380
|
+
model=fast_model,
|
|
381
|
+
message_id="test-id",
|
|
382
|
+
message_type=MessageType.CHAT,
|
|
383
|
+
reply_fn=mock_reply
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Verify that create was called with the fast model that was passed (not overridden)
|
|
387
|
+
mock_create.assert_called_once()
|
|
388
|
+
call_args = mock_create.call_args
|
|
389
|
+
assert call_args[1]['model'] == fast_model
|
|
390
|
+
|
|
391
|
+
@pytest.mark.asyncio
|
|
392
|
+
async def test_anthropic_client_stream_uses_smartest_model_from_provider_manager_without_override():
|
|
393
|
+
"""Test that Anthropic client stream_completions uses the smartest model passed from ProviderManager without internal override."""
|
|
394
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
395
|
+
|
|
396
|
+
client = AnthropicClient(api_key="test_key")
|
|
397
|
+
|
|
398
|
+
# Mock the beta.messages.create method for streaming
|
|
399
|
+
with patch.object(client.client.beta.messages, 'create') as mock_create: # type: ignore
|
|
400
|
+
# Create a mock stream response
|
|
401
|
+
class MockStreamChunk:
|
|
402
|
+
def __init__(self, chunk_type, text=""):
|
|
403
|
+
self.type = chunk_type
|
|
404
|
+
if chunk_type == "content_block_delta":
|
|
405
|
+
self.delta = MagicMock()
|
|
406
|
+
self.delta.type = "text_delta"
|
|
407
|
+
self.delta.text = text
|
|
408
|
+
|
|
409
|
+
mock_stream = [
|
|
410
|
+
MockStreamChunk("content_block_delta", "test"),
|
|
411
|
+
MockStreamChunk("message_stop")
|
|
412
|
+
]
|
|
413
|
+
mock_create.return_value = iter(mock_stream)
|
|
414
|
+
|
|
415
|
+
# Use a smartest model that would be selected by ProviderManager
|
|
416
|
+
smartest_model = get_smartest_model_for_selected_model("claude-haiku-4-5-20251001")
|
|
417
|
+
|
|
418
|
+
reply_chunks = []
|
|
419
|
+
def mock_reply(chunk):
|
|
420
|
+
reply_chunks.append(chunk)
|
|
421
|
+
|
|
422
|
+
await client.stream_completions(
|
|
423
|
+
messages=[{"role": "user", "content": "Test message"}],
|
|
424
|
+
model=smartest_model,
|
|
425
|
+
message_id="test-id",
|
|
426
|
+
message_type=MessageType.CHAT,
|
|
427
|
+
reply_fn=mock_reply
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
# Verify that create was called with the smartest model that was passed (not overridden)
|
|
431
|
+
mock_create.assert_called_once()
|
|
432
|
+
call_args = mock_create.call_args
|
|
433
|
+
assert call_args[1]['model'] == smartest_model
|
|
276
434
|
|
|
277
435
|
|
|
278
436
|
# Caching Tests
|