mito-ai 0.1.57__py3-none-any.whl → 0.1.58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +16 -22
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +24 -14
- mito_ai/chart_wizard/handlers.py +78 -17
- mito_ai/chart_wizard/urls.py +8 -5
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
- mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
- mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
- mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
- mito_ai/completions/completion_handlers/completion_handler.py +3 -5
- mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
- mito_ai/completions/completion_handlers/scratchpad_result_handler.py +6 -8
- mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
- mito_ai/completions/completion_handlers/utils.py +3 -7
- mito_ai/completions/handlers.py +32 -22
- mito_ai/completions/message_history.py +8 -10
- mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
- mito_ai/constants.py +8 -1
- mito_ai/enterprise/__init__.py +1 -1
- mito_ai/enterprise/litellm_client.py +137 -0
- mito_ai/log/handlers.py +1 -1
- mito_ai/openai_client.py +10 -90
- mito_ai/{completions/providers.py → provider_manager.py} +157 -53
- mito_ai/settings/enterprise_handler.py +26 -0
- mito_ai/settings/urls.py +2 -0
- mito_ai/streamlit_conversion/agent_utils.py +2 -30
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
- mito_ai/streamlit_preview/handlers.py +6 -3
- mito_ai/streamlit_preview/urls.py +5 -3
- mito_ai/tests/message_history/test_generate_short_chat_name.py +72 -28
- mito_ai/tests/providers/test_anthropic_client.py +174 -16
- mito_ai/tests/providers/test_azure.py +13 -13
- mito_ai/tests/providers/test_capabilities.py +14 -17
- mito_ai/tests/providers/test_gemini_client.py +14 -13
- mito_ai/tests/providers/test_model_resolution.py +145 -89
- mito_ai/tests/providers/test_openai_client.py +209 -13
- mito_ai/tests/providers/test_provider_limits.py +5 -5
- mito_ai/tests/providers/test_providers.py +229 -51
- mito_ai/tests/providers/test_retry_logic.py +13 -22
- mito_ai/tests/providers/utils.py +4 -4
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
- mito_ai/tests/test_enterprise_mode.py +162 -0
- mito_ai/tests/test_model_utils.py +271 -0
- mito_ai/utils/anthropic_utils.py +8 -6
- mito_ai/utils/gemini_utils.py +0 -3
- mito_ai/utils/litellm_utils.py +84 -0
- mito_ai/utils/model_utils.py +178 -0
- mito_ai/utils/open_ai_utils.py +0 -8
- mito_ai/utils/provider_utils.py +6 -28
- mito_ai/utils/telemetry_utils.py +14 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js +671 -75
- mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +1 -0
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js +17 -17
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +1 -1
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/METADATA +2 -1
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/RECORD +86 -79
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +0 -1
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.58.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,7 +9,7 @@ from unittest.mock import patch, MagicMock, AsyncMock
|
|
|
9
9
|
from mito_ai.tests.providers.utils import mock_azure_openai_client, mock_openai_client, patch_server_limits
|
|
10
10
|
import pytest
|
|
11
11
|
from traitlets.config import Config
|
|
12
|
-
from mito_ai.
|
|
12
|
+
from mito_ai.provider_manager import ProviderManager
|
|
13
13
|
from mito_ai.completions.models import (
|
|
14
14
|
MessageType,
|
|
15
15
|
AICapabilities,
|
|
@@ -24,16 +24,16 @@ FAKE_API_KEY = "sk-1234567890"
|
|
|
24
24
|
|
|
25
25
|
@pytest.fixture
|
|
26
26
|
def provider_config() -> Config:
|
|
27
|
-
"""Create a proper Config object for the
|
|
27
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
28
28
|
config = Config()
|
|
29
|
-
config.
|
|
29
|
+
config.ProviderManager = Config()
|
|
30
30
|
config.OpenAIClient = Config()
|
|
31
31
|
return config
|
|
32
32
|
|
|
33
33
|
@pytest.fixture(autouse=True)
|
|
34
34
|
def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
35
35
|
for var in [
|
|
36
|
-
"OPENAI_API_KEY", "
|
|
36
|
+
"OPENAI_API_KEY", "ANTHROPIC_API_KEY",
|
|
37
37
|
"GEMINI_API_KEY", "OLLAMA_MODEL",
|
|
38
38
|
"AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_MODEL"
|
|
39
39
|
]:
|
|
@@ -49,18 +49,18 @@ def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
|
49
49
|
"name": "openai",
|
|
50
50
|
"env_vars": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
51
51
|
"constants": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
52
|
-
"model": "gpt-
|
|
53
|
-
"mock_patch": "mito_ai.
|
|
52
|
+
"model": "gpt-4.1",
|
|
53
|
+
"mock_patch": "mito_ai.provider_manager.OpenAIClient",
|
|
54
54
|
"mock_method": "request_completions",
|
|
55
55
|
"provider_name": "OpenAI with user key",
|
|
56
56
|
"key_type": "user"
|
|
57
57
|
},
|
|
58
58
|
{
|
|
59
59
|
"name": "claude",
|
|
60
|
-
"env_vars": {"
|
|
61
|
-
"constants": {"
|
|
62
|
-
"model": "claude-
|
|
63
|
-
"mock_patch": "mito_ai.
|
|
60
|
+
"env_vars": {"ANTHROPIC_API_KEY": "claude-key"},
|
|
61
|
+
"constants": {"ANTHROPIC_API_KEY": "claude-key", "OPENAI_API_KEY": None},
|
|
62
|
+
"model": "claude-sonnet-4-5-20250929",
|
|
63
|
+
"mock_patch": "mito_ai.provider_manager.AnthropicClient",
|
|
64
64
|
"mock_method": "request_completions",
|
|
65
65
|
"provider_name": "Claude",
|
|
66
66
|
"key_type": "claude"
|
|
@@ -69,8 +69,8 @@ def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
|
69
69
|
"name": "gemini",
|
|
70
70
|
"env_vars": {"GEMINI_API_KEY": "gemini-key"},
|
|
71
71
|
"constants": {"GEMINI_API_KEY": "gemini-key", "OPENAI_API_KEY": None},
|
|
72
|
-
"model": "gemini-
|
|
73
|
-
"mock_patch": "mito_ai.
|
|
72
|
+
"model": "gemini-3-flash-preview",
|
|
73
|
+
"mock_patch": "mito_ai.provider_manager.GeminiClient",
|
|
74
74
|
"mock_method": "request_completions",
|
|
75
75
|
"provider_name": "Gemini",
|
|
76
76
|
"key_type": "gemini"
|
|
@@ -79,8 +79,8 @@ def reset_env_vars(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
|
79
79
|
"name": "azure",
|
|
80
80
|
"env_vars": {"AZURE_OPENAI_API_KEY": "azure-key"},
|
|
81
81
|
"constants": {"AZURE_OPENAI_API_KEY": "azure-key", "OPENAI_API_KEY": None},
|
|
82
|
-
"model": "gpt-
|
|
83
|
-
"mock_patch": "mito_ai.
|
|
82
|
+
"model": "gpt-4.1",
|
|
83
|
+
"mock_patch": "mito_ai.provider_manager.OpenAIClient",
|
|
84
84
|
"mock_method": "request_completions",
|
|
85
85
|
"provider_name": "Azure OpenAI",
|
|
86
86
|
"key_type": "azure"
|
|
@@ -113,15 +113,15 @@ async def test_completion_request(
|
|
|
113
113
|
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
114
114
|
|
|
115
115
|
with patch(provider_config_data["mock_patch"], return_value=mock_client):
|
|
116
|
-
llm =
|
|
116
|
+
llm = ProviderManager(config=provider_config)
|
|
117
|
+
llm.set_selected_model(provider_config_data["model"])
|
|
117
118
|
messages: List[ChatCompletionMessageParam] = [
|
|
118
119
|
{"role": "user", "content": "Test message"}
|
|
119
120
|
]
|
|
120
121
|
|
|
121
122
|
completion = await llm.request_completions(
|
|
122
123
|
message_type=MessageType.CHAT,
|
|
123
|
-
messages=messages
|
|
124
|
-
model=provider_config_data["model"]
|
|
124
|
+
messages=messages
|
|
125
125
|
)
|
|
126
126
|
|
|
127
127
|
assert completion == "Test completion"
|
|
@@ -133,18 +133,18 @@ async def test_completion_request(
|
|
|
133
133
|
"name": "openai",
|
|
134
134
|
"env_vars": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
135
135
|
"constants": {"OPENAI_API_KEY": FAKE_API_KEY},
|
|
136
|
-
"model": "gpt-
|
|
137
|
-
"mock_patch": "mito_ai.
|
|
136
|
+
"model": "gpt-4.1",
|
|
137
|
+
"mock_patch": "mito_ai.provider_manager.OpenAIClient",
|
|
138
138
|
"mock_method": "stream_completions",
|
|
139
139
|
"provider_name": "OpenAI with user key",
|
|
140
140
|
"key_type": "user"
|
|
141
141
|
},
|
|
142
142
|
{
|
|
143
143
|
"name": "claude",
|
|
144
|
-
"env_vars": {"
|
|
145
|
-
"constants": {"
|
|
146
|
-
"model": "claude-
|
|
147
|
-
"mock_patch": "mito_ai.
|
|
144
|
+
"env_vars": {"ANTHROPIC_API_KEY": "claude-key"},
|
|
145
|
+
"constants": {"ANTHROPIC_API_KEY": "claude-key", "OPENAI_API_KEY": None},
|
|
146
|
+
"model": "claude-sonnet-4-5-20250929",
|
|
147
|
+
"mock_patch": "mito_ai.provider_manager.AnthropicClient",
|
|
148
148
|
"mock_method": "stream_completions",
|
|
149
149
|
"provider_name": "Claude",
|
|
150
150
|
"key_type": "claude"
|
|
@@ -153,8 +153,8 @@ async def test_completion_request(
|
|
|
153
153
|
"name": "gemini",
|
|
154
154
|
"env_vars": {"GEMINI_API_KEY": "gemini-key"},
|
|
155
155
|
"constants": {"GEMINI_API_KEY": "gemini-key", "OPENAI_API_KEY": None},
|
|
156
|
-
"model": "gemini-
|
|
157
|
-
"mock_patch": "mito_ai.
|
|
156
|
+
"model": "gemini-3-flash-preview",
|
|
157
|
+
"mock_patch": "mito_ai.provider_manager.GeminiClient",
|
|
158
158
|
"mock_method": "stream_completions",
|
|
159
159
|
"provider_name": "Gemini",
|
|
160
160
|
"key_type": "gemini"
|
|
@@ -188,7 +188,8 @@ async def test_stream_completion_parameterized(
|
|
|
188
188
|
mock_client.stream_response = AsyncMock(return_value="Test completion") # For Claude
|
|
189
189
|
|
|
190
190
|
with patch(provider_config_data["mock_patch"], return_value=mock_client):
|
|
191
|
-
llm =
|
|
191
|
+
llm = ProviderManager(config=provider_config)
|
|
192
|
+
llm.set_selected_model(provider_config_data["model"])
|
|
192
193
|
messages: List[ChatCompletionMessageParam] = [
|
|
193
194
|
{"role": "user", "content": "Test message"}
|
|
194
195
|
]
|
|
@@ -200,7 +201,6 @@ async def test_stream_completion_parameterized(
|
|
|
200
201
|
completion = await llm.stream_completions(
|
|
201
202
|
message_type=MessageType.CHAT,
|
|
202
203
|
messages=messages,
|
|
203
|
-
model=provider_config_data["model"],
|
|
204
204
|
message_id="test-id",
|
|
205
205
|
thread_id="test-thread",
|
|
206
206
|
reply_fn=mock_reply
|
|
@@ -217,33 +217,33 @@ def test_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config
|
|
|
217
217
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", "invalid-key")
|
|
218
218
|
mock_client = MagicMock()
|
|
219
219
|
mock_client.capabilities = AICapabilities(
|
|
220
|
-
configuration={"model": "gpt-
|
|
220
|
+
configuration={"model": "gpt-4.1"},
|
|
221
221
|
provider="OpenAI with user key",
|
|
222
222
|
type="ai_capabilities"
|
|
223
223
|
)
|
|
224
224
|
mock_client.key_type = "user"
|
|
225
225
|
mock_client.request_completions.side_effect = Exception("API error")
|
|
226
226
|
|
|
227
|
-
with patch("mito_ai.
|
|
228
|
-
llm =
|
|
227
|
+
with patch("mito_ai.provider_manager.OpenAIClient", return_value=mock_client):
|
|
228
|
+
llm = ProviderManager(config=provider_config)
|
|
229
229
|
assert llm.last_error is None # Error should be None until a request is made
|
|
230
230
|
|
|
231
231
|
def test_claude_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config: Config) -> None:
|
|
232
|
-
monkeypatch.setenv("
|
|
233
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
232
|
+
monkeypatch.setenv("ANTHROPIC_API_KEY", "invalid-key")
|
|
233
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", "invalid-key")
|
|
234
234
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
235
235
|
|
|
236
236
|
mock_client = MagicMock()
|
|
237
237
|
mock_client.capabilities = AICapabilities(
|
|
238
|
-
configuration={"model": "claude-
|
|
238
|
+
configuration={"model": "claude-sonnet-4-5-20250929"},
|
|
239
239
|
provider="Claude",
|
|
240
240
|
type="ai_capabilities"
|
|
241
241
|
)
|
|
242
242
|
mock_client.key_type = "claude"
|
|
243
243
|
mock_client.request_completions.side_effect = Exception("API error")
|
|
244
244
|
|
|
245
|
-
with patch("mito_ai.
|
|
246
|
-
llm =
|
|
245
|
+
with patch("mito_ai.provider_manager.AnthropicClient", return_value=mock_client):
|
|
246
|
+
llm = ProviderManager(config=provider_config)
|
|
247
247
|
assert llm.last_error is None # Error should be None until a request is made
|
|
248
248
|
|
|
249
249
|
|
|
@@ -251,21 +251,21 @@ def test_claude_error_handling(monkeypatch: pytest.MonkeyPatch, provider_config:
|
|
|
251
251
|
@pytest.mark.parametrize("mito_server_config", [
|
|
252
252
|
{
|
|
253
253
|
"name": "openai_fallback",
|
|
254
|
-
"model": "gpt-
|
|
254
|
+
"model": "gpt-4.1",
|
|
255
255
|
"mock_function": "mito_ai.openai_client.get_ai_completion_from_mito_server",
|
|
256
256
|
"provider_name": "Mito server",
|
|
257
257
|
"key_type": "mito_server"
|
|
258
258
|
},
|
|
259
259
|
{
|
|
260
260
|
"name": "claude_fallback",
|
|
261
|
-
"model": "claude-
|
|
261
|
+
"model": "claude-sonnet-4-5-20250929",
|
|
262
262
|
"mock_function": "mito_ai.anthropic_client.get_anthropic_completion_from_mito_server",
|
|
263
263
|
"provider_name": "Claude",
|
|
264
264
|
"key_type": "claude"
|
|
265
265
|
},
|
|
266
266
|
{
|
|
267
267
|
"name": "gemini_fallback",
|
|
268
|
-
"model": "gemini-
|
|
268
|
+
"model": "gemini-3-flash-preview",
|
|
269
269
|
"mock_function": "mito_ai.gemini_client.get_gemini_completion_from_mito_server",
|
|
270
270
|
"provider_name": "Gemini",
|
|
271
271
|
"key_type": "gemini"
|
|
@@ -280,10 +280,9 @@ async def test_mito_server_fallback_completion_request(
|
|
|
280
280
|
"""Test that completion requests fallback to Mito server when no API keys are set."""
|
|
281
281
|
# Clear all API keys to force Mito server fallback
|
|
282
282
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
283
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
283
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", None)
|
|
284
284
|
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
285
285
|
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
286
|
-
provider_config.OpenAIProvider.api_key = None
|
|
287
286
|
|
|
288
287
|
# Mock the appropriate Mito server function
|
|
289
288
|
with patch(mito_server_config["mock_function"], new_callable=AsyncMock) as mock_mito_function:
|
|
@@ -294,12 +293,12 @@ async def test_mito_server_fallback_completion_request(
|
|
|
294
293
|
]
|
|
295
294
|
|
|
296
295
|
with patch_server_limits():
|
|
297
|
-
llm =
|
|
296
|
+
llm = ProviderManager(config=provider_config)
|
|
297
|
+
llm.set_selected_model(mito_server_config["model"])
|
|
298
298
|
|
|
299
299
|
completion = await llm.request_completions(
|
|
300
300
|
message_type=MessageType.CHAT,
|
|
301
|
-
messages=messages
|
|
302
|
-
model=mito_server_config["model"]
|
|
301
|
+
messages=messages
|
|
303
302
|
)
|
|
304
303
|
|
|
305
304
|
assert completion == "Mito server response"
|
|
@@ -309,21 +308,21 @@ async def test_mito_server_fallback_completion_request(
|
|
|
309
308
|
@pytest.mark.parametrize("mito_server_config", [
|
|
310
309
|
{
|
|
311
310
|
"name": "openai_fallback",
|
|
312
|
-
"model": "gpt-
|
|
311
|
+
"model": "gpt-4.1",
|
|
313
312
|
"mock_function": "mito_ai.openai_client.stream_ai_completion_from_mito_server",
|
|
314
313
|
"provider_name": "Mito server",
|
|
315
314
|
"key_type": "mito_server"
|
|
316
315
|
},
|
|
317
316
|
{
|
|
318
317
|
"name": "claude_fallback",
|
|
319
|
-
"model": "claude-
|
|
318
|
+
"model": "claude-sonnet-4-5-20250929",
|
|
320
319
|
"mock_function": "mito_ai.anthropic_client.stream_anthropic_completion_from_mito_server",
|
|
321
320
|
"provider_name": "Claude",
|
|
322
321
|
"key_type": "claude"
|
|
323
322
|
},
|
|
324
323
|
{
|
|
325
324
|
"name": "gemini_fallback",
|
|
326
|
-
"model": "gemini-
|
|
325
|
+
"model": "gemini-3-flash-preview",
|
|
327
326
|
"mock_function": "mito_ai.gemini_client.stream_gemini_completion_from_mito_server",
|
|
328
327
|
"provider_name": "Gemini",
|
|
329
328
|
"key_type": "gemini"
|
|
@@ -338,10 +337,9 @@ async def test_mito_server_fallback_stream_completion(
|
|
|
338
337
|
"""Test that stream completions fallback to Mito server when no API keys are set."""
|
|
339
338
|
# Clear all API keys to force Mito server fallback
|
|
340
339
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", None)
|
|
341
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
340
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", None)
|
|
342
341
|
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
343
342
|
monkeypatch.setattr("mito_ai.enterprise.utils.is_azure_openai_configured", lambda: False)
|
|
344
|
-
provider_config.OpenAIProvider.api_key = None
|
|
345
343
|
|
|
346
344
|
# Create an async generator that yields chunks for streaming
|
|
347
345
|
async def mock_stream_generator():
|
|
@@ -364,12 +362,12 @@ async def test_mito_server_fallback_stream_completion(
|
|
|
364
362
|
# Apply patch_server_limits for all cases, not just openai_fallback
|
|
365
363
|
# Also patch update_mito_server_quota where it's actually used in openai_client
|
|
366
364
|
with patch_server_limits(), patch("mito_ai.openai_client.update_mito_server_quota", MagicMock(return_value=None)):
|
|
367
|
-
llm =
|
|
365
|
+
llm = ProviderManager(config=provider_config)
|
|
366
|
+
llm.set_selected_model(mito_server_config["model"])
|
|
368
367
|
|
|
369
368
|
completion = await llm.stream_completions(
|
|
370
369
|
message_type=MessageType.CHAT,
|
|
371
370
|
messages=messages,
|
|
372
|
-
model=mito_server_config["model"],
|
|
373
371
|
message_id="test-id",
|
|
374
372
|
thread_id="test-thread",
|
|
375
373
|
reply_fn=mock_reply
|
|
@@ -379,4 +377,184 @@ async def test_mito_server_fallback_stream_completion(
|
|
|
379
377
|
mock_mito_stream.assert_called_once()
|
|
380
378
|
# Verify that reply chunks were generated
|
|
381
379
|
assert len(reply_chunks) > 0
|
|
382
|
-
assert isinstance(reply_chunks[0], CompletionReply)
|
|
380
|
+
assert isinstance(reply_chunks[0], CompletionReply)
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
# Fast and Smartest Model Tests
|
|
384
|
+
@pytest.mark.asyncio
|
|
385
|
+
async def test_provider_manager_uses_fast_model_for_request_completions(
|
|
386
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
387
|
+
provider_config: Config
|
|
388
|
+
) -> None:
|
|
389
|
+
"""Test that ProviderManager correctly sets and uses fast model for request_completions when requested."""
|
|
390
|
+
# Set up environment variables to ensure OpenAI provider is used
|
|
391
|
+
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
392
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
393
|
+
|
|
394
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
395
|
+
|
|
396
|
+
# Create mock client
|
|
397
|
+
mock_client = MagicMock()
|
|
398
|
+
mock_client.capabilities = AICapabilities(
|
|
399
|
+
configuration={"model": "gpt-4.1"},
|
|
400
|
+
provider="OpenAI with user key",
|
|
401
|
+
type="ai_capabilities"
|
|
402
|
+
)
|
|
403
|
+
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
404
|
+
|
|
405
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
406
|
+
{"role": "user", "content": "Test message"}
|
|
407
|
+
]
|
|
408
|
+
|
|
409
|
+
with patch("mito_ai.provider_manager.OpenAIClient", return_value=mock_client):
|
|
410
|
+
provider = ProviderManager(config=provider_config)
|
|
411
|
+
provider.set_selected_model("gpt-5.2")
|
|
412
|
+
|
|
413
|
+
await provider.request_completions(
|
|
414
|
+
message_type=MessageType.CHAT,
|
|
415
|
+
messages=messages,
|
|
416
|
+
use_fast_model=True
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
# Verify that request_completions was called with the fast model
|
|
420
|
+
mock_client.request_completions.assert_called_once()
|
|
421
|
+
call_args = mock_client.request_completions.call_args
|
|
422
|
+
expected_fast_model = get_fast_model_for_selected_model("gpt-5.2")
|
|
423
|
+
assert call_args[1]['model'] == expected_fast_model
|
|
424
|
+
|
|
425
|
+
@pytest.mark.asyncio
|
|
426
|
+
async def test_provider_manager_uses_smartest_model_for_request_completions(
|
|
427
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
428
|
+
provider_config: Config
|
|
429
|
+
) -> None:
|
|
430
|
+
"""Test that ProviderManager correctly sets and uses smartest model for request_completions when requested."""
|
|
431
|
+
# Set up environment variables to ensure OpenAI provider is used
|
|
432
|
+
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
433
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
434
|
+
|
|
435
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
436
|
+
|
|
437
|
+
# Create mock client
|
|
438
|
+
mock_client = MagicMock()
|
|
439
|
+
mock_client.capabilities = AICapabilities(
|
|
440
|
+
configuration={"model": "gpt-4.1"},
|
|
441
|
+
provider="OpenAI with user key",
|
|
442
|
+
type="ai_capabilities"
|
|
443
|
+
)
|
|
444
|
+
mock_client.request_completions = AsyncMock(return_value="Test completion")
|
|
445
|
+
|
|
446
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
447
|
+
{"role": "user", "content": "Test message"}
|
|
448
|
+
]
|
|
449
|
+
|
|
450
|
+
with patch("mito_ai.provider_manager.OpenAIClient", return_value=mock_client):
|
|
451
|
+
provider = ProviderManager(config=provider_config)
|
|
452
|
+
provider.set_selected_model("gpt-4.1")
|
|
453
|
+
|
|
454
|
+
await provider.request_completions(
|
|
455
|
+
message_type=MessageType.CHAT,
|
|
456
|
+
messages=messages,
|
|
457
|
+
use_smartest_model=True
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
# Verify that request_completions was called with the smartest model
|
|
461
|
+
mock_client.request_completions.assert_called_once()
|
|
462
|
+
call_args = mock_client.request_completions.call_args
|
|
463
|
+
expected_smartest_model = get_smartest_model_for_selected_model("gpt-4.1")
|
|
464
|
+
assert call_args[1]['model'] == expected_smartest_model
|
|
465
|
+
|
|
466
|
+
@pytest.mark.asyncio
|
|
467
|
+
async def test_provider_manager_uses_fast_model_for_stream_completions(
|
|
468
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
469
|
+
provider_config: Config
|
|
470
|
+
) -> None:
|
|
471
|
+
"""Test that ProviderManager correctly sets and uses fast model for stream_completions when requested."""
|
|
472
|
+
# Set up environment variables to ensure OpenAI provider is used
|
|
473
|
+
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
474
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
475
|
+
|
|
476
|
+
from mito_ai.utils.model_utils import get_fast_model_for_selected_model
|
|
477
|
+
|
|
478
|
+
# Create mock client
|
|
479
|
+
mock_client = MagicMock()
|
|
480
|
+
mock_client.capabilities = AICapabilities(
|
|
481
|
+
configuration={"model": "gpt-4.1"},
|
|
482
|
+
provider="OpenAI with user key",
|
|
483
|
+
type="ai_capabilities"
|
|
484
|
+
)
|
|
485
|
+
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
486
|
+
|
|
487
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
488
|
+
{"role": "user", "content": "Test message"}
|
|
489
|
+
]
|
|
490
|
+
|
|
491
|
+
reply_chunks = []
|
|
492
|
+
def mock_reply(chunk):
|
|
493
|
+
reply_chunks.append(chunk)
|
|
494
|
+
|
|
495
|
+
with patch("mito_ai.provider_manager.OpenAIClient", return_value=mock_client):
|
|
496
|
+
provider = ProviderManager(config=provider_config)
|
|
497
|
+
provider.set_selected_model("gpt-5.2")
|
|
498
|
+
|
|
499
|
+
await provider.stream_completions(
|
|
500
|
+
message_type=MessageType.CHAT,
|
|
501
|
+
messages=messages,
|
|
502
|
+
message_id="test-id",
|
|
503
|
+
thread_id="test-thread",
|
|
504
|
+
reply_fn=mock_reply,
|
|
505
|
+
use_fast_model=True
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
# Verify that stream_completions was called with the fast model
|
|
509
|
+
mock_client.stream_completions.assert_called_once()
|
|
510
|
+
call_args = mock_client.stream_completions.call_args
|
|
511
|
+
expected_fast_model = get_fast_model_for_selected_model("gpt-5.2")
|
|
512
|
+
assert call_args[1]['model'] == expected_fast_model
|
|
513
|
+
|
|
514
|
+
@pytest.mark.asyncio
|
|
515
|
+
async def test_provider_manager_uses_smartest_model_for_stream_completions(
|
|
516
|
+
monkeypatch: pytest.MonkeyPatch,
|
|
517
|
+
provider_config: Config
|
|
518
|
+
) -> None:
|
|
519
|
+
"""Test that ProviderManager correctly sets and uses smartest model for stream_completions when requested."""
|
|
520
|
+
# Set up environment variables to ensure OpenAI provider is used
|
|
521
|
+
monkeypatch.setenv("OPENAI_API_KEY", FAKE_API_KEY)
|
|
522
|
+
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
523
|
+
|
|
524
|
+
from mito_ai.utils.model_utils import get_smartest_model_for_selected_model
|
|
525
|
+
|
|
526
|
+
# Create mock client
|
|
527
|
+
mock_client = MagicMock()
|
|
528
|
+
mock_client.capabilities = AICapabilities(
|
|
529
|
+
configuration={"model": "gpt-4.1"},
|
|
530
|
+
provider="OpenAI with user key",
|
|
531
|
+
type="ai_capabilities"
|
|
532
|
+
)
|
|
533
|
+
mock_client.stream_completions = AsyncMock(return_value="Test completion")
|
|
534
|
+
|
|
535
|
+
messages: List[ChatCompletionMessageParam] = [
|
|
536
|
+
{"role": "user", "content": "Test message"}
|
|
537
|
+
]
|
|
538
|
+
|
|
539
|
+
reply_chunks = []
|
|
540
|
+
def mock_reply(chunk):
|
|
541
|
+
reply_chunks.append(chunk)
|
|
542
|
+
|
|
543
|
+
with patch("mito_ai.provider_manager.OpenAIClient", return_value=mock_client):
|
|
544
|
+
provider = ProviderManager(config=provider_config)
|
|
545
|
+
provider.set_selected_model("gpt-4.1")
|
|
546
|
+
|
|
547
|
+
await provider.stream_completions(
|
|
548
|
+
message_type=MessageType.CHAT,
|
|
549
|
+
messages=messages,
|
|
550
|
+
message_id="test-id",
|
|
551
|
+
thread_id="test-thread",
|
|
552
|
+
reply_fn=mock_reply,
|
|
553
|
+
use_smartest_model=True
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
# Verify that stream_completions was called with the smartest model
|
|
557
|
+
mock_client.stream_completions.assert_called_once()
|
|
558
|
+
call_args = mock_client.stream_completions.call_args
|
|
559
|
+
expected_smartest_model = get_smartest_model_for_selected_model("gpt-4.1")
|
|
560
|
+
assert call_args[1]['model'] == expected_smartest_model
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
import pytest
|
|
5
5
|
import asyncio
|
|
6
6
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
7
|
-
from mito_ai.
|
|
7
|
+
from mito_ai.provider_manager import ProviderManager
|
|
8
8
|
from mito_ai.completions.models import MessageType, CompletionError
|
|
9
9
|
from mito_ai.utils.mito_server_utils import ProviderCompletionException
|
|
10
10
|
from mito_ai.tests.providers.utils import mock_openai_client, patch_server_limits
|
|
@@ -14,9 +14,9 @@ FAKE_API_KEY = "sk-1234567890"
|
|
|
14
14
|
|
|
15
15
|
@pytest.fixture
|
|
16
16
|
def provider_config() -> Config:
|
|
17
|
-
"""Create a proper Config object for the
|
|
17
|
+
"""Create a proper Config object for the ProviderManager."""
|
|
18
18
|
config = Config()
|
|
19
|
-
config.
|
|
19
|
+
config.ProviderManager = Config()
|
|
20
20
|
config.OpenAIClient = Config()
|
|
21
21
|
return config
|
|
22
22
|
|
|
@@ -32,7 +32,7 @@ def mock_sleep():
|
|
|
32
32
|
yield mock
|
|
33
33
|
|
|
34
34
|
class TestRetryLogic:
|
|
35
|
-
"""Test retry logic in
|
|
35
|
+
"""Test retry logic in ProviderManager.request_completions."""
|
|
36
36
|
|
|
37
37
|
@pytest.mark.parametrize("attempts_before_success,max_retries,expected_call_count", [
|
|
38
38
|
(0, 3, 1), # Success on first try
|
|
@@ -64,13 +64,12 @@ class TestRetryLogic:
|
|
|
64
64
|
):
|
|
65
65
|
mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
|
|
66
66
|
|
|
67
|
-
provider =
|
|
67
|
+
provider = ProviderManager(config=provider_config)
|
|
68
68
|
|
|
69
69
|
# Test successful completion
|
|
70
70
|
result = await provider.request_completions(
|
|
71
71
|
message_type=MessageType.CHAT,
|
|
72
72
|
messages=mock_messages,
|
|
73
|
-
model="gpt-4o-mini",
|
|
74
73
|
max_retries=max_retries
|
|
75
74
|
)
|
|
76
75
|
|
|
@@ -114,14 +113,13 @@ class TestRetryLogic:
|
|
|
114
113
|
):
|
|
115
114
|
mock_client.return_value.request_completions = AsyncMock(side_effect=test_exception)
|
|
116
115
|
|
|
117
|
-
provider =
|
|
116
|
+
provider = ProviderManager(config=provider_config)
|
|
118
117
|
|
|
119
118
|
# Test failure after all retries
|
|
120
119
|
with pytest.raises(exception_type):
|
|
121
120
|
await provider.request_completions(
|
|
122
121
|
message_type=MessageType.CHAT,
|
|
123
122
|
messages=mock_messages,
|
|
124
|
-
model="gpt-4o-mini",
|
|
125
123
|
max_retries=max_retries
|
|
126
124
|
)
|
|
127
125
|
|
|
@@ -158,13 +156,12 @@ class TestRetryLogic:
|
|
|
158
156
|
):
|
|
159
157
|
mock_client.return_value.request_completions = AsyncMock(side_effect=side_effects)
|
|
160
158
|
|
|
161
|
-
provider =
|
|
159
|
+
provider = ProviderManager(config=provider_config)
|
|
162
160
|
|
|
163
161
|
# Should succeed after 3 retries with mixed exceptions
|
|
164
162
|
result = await provider.request_completions(
|
|
165
163
|
message_type=MessageType.CHAT,
|
|
166
164
|
messages=mock_messages,
|
|
167
|
-
model="gpt-4o-mini",
|
|
168
165
|
max_retries=3
|
|
169
166
|
)
|
|
170
167
|
|
|
@@ -189,7 +186,7 @@ class TestRetryLogic:
|
|
|
189
186
|
patch_server_limits(),
|
|
190
187
|
mock_openai_client() as mock_client
|
|
191
188
|
):
|
|
192
|
-
provider =
|
|
189
|
+
provider = ProviderManager(config=provider_config)
|
|
193
190
|
|
|
194
191
|
# First request fails to set an error
|
|
195
192
|
mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("First error"))
|
|
@@ -198,7 +195,6 @@ class TestRetryLogic:
|
|
|
198
195
|
await provider.request_completions(
|
|
199
196
|
message_type=MessageType.CHAT,
|
|
200
197
|
messages=mock_messages,
|
|
201
|
-
model="gpt-4o-mini",
|
|
202
198
|
max_retries=0 # No retries to fail quickly
|
|
203
199
|
)
|
|
204
200
|
|
|
@@ -211,7 +207,6 @@ class TestRetryLogic:
|
|
|
211
207
|
result = await provider.request_completions(
|
|
212
208
|
message_type=MessageType.CHAT,
|
|
213
209
|
messages=mock_messages,
|
|
214
|
-
model="gpt-4o-mini",
|
|
215
210
|
max_retries=3
|
|
216
211
|
)
|
|
217
212
|
|
|
@@ -236,14 +231,13 @@ class TestRetryLogic:
|
|
|
236
231
|
):
|
|
237
232
|
mock_client.return_value.request_completions = AsyncMock(side_effect=Exception("Test error"))
|
|
238
233
|
|
|
239
|
-
provider =
|
|
234
|
+
provider = ProviderManager(config=provider_config)
|
|
240
235
|
|
|
241
236
|
# Should fail immediately with no retries
|
|
242
237
|
with pytest.raises(Exception):
|
|
243
238
|
await provider.request_completions(
|
|
244
239
|
message_type=MessageType.CHAT,
|
|
245
240
|
messages=mock_messages,
|
|
246
|
-
model="gpt-4o-mini",
|
|
247
241
|
max_retries=0
|
|
248
242
|
)
|
|
249
243
|
|
|
@@ -272,14 +266,13 @@ class TestRetryLogic:
|
|
|
272
266
|
):
|
|
273
267
|
mock_client.return_value.request_completions = AsyncMock(side_effect=provider_exception)
|
|
274
268
|
|
|
275
|
-
provider =
|
|
269
|
+
provider = ProviderManager(config=provider_config)
|
|
276
270
|
|
|
277
271
|
# Should fail after retries
|
|
278
272
|
with pytest.raises(ProviderCompletionException):
|
|
279
273
|
await provider.request_completions(
|
|
280
274
|
message_type=MessageType.CHAT,
|
|
281
275
|
messages=mock_messages,
|
|
282
|
-
model="gpt-4o-mini",
|
|
283
276
|
max_retries=2
|
|
284
277
|
)
|
|
285
278
|
|
|
@@ -326,9 +319,9 @@ class TestRetryLogic:
|
|
|
326
319
|
monkeypatch.setattr("mito_ai.constants.OPENAI_API_KEY", FAKE_API_KEY)
|
|
327
320
|
|
|
328
321
|
# Clear other API keys to ensure OpenAI path is used
|
|
329
|
-
monkeypatch.delenv("
|
|
322
|
+
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
|
|
330
323
|
monkeypatch.delenv("GEMINI_API_KEY", raising=False)
|
|
331
|
-
monkeypatch.setattr("mito_ai.constants.
|
|
324
|
+
monkeypatch.setattr("mito_ai.constants.ANTHROPIC_API_KEY", None)
|
|
332
325
|
monkeypatch.setattr("mito_ai.constants.GEMINI_API_KEY", None)
|
|
333
326
|
|
|
334
327
|
# Enable print logs to capture telemetry output
|
|
@@ -341,7 +334,7 @@ class TestRetryLogic:
|
|
|
341
334
|
mock_client.key_type = "user"
|
|
342
335
|
|
|
343
336
|
# Create the provider and set the mock client
|
|
344
|
-
provider =
|
|
337
|
+
provider = ProviderManager(config=provider_config)
|
|
345
338
|
provider._openai_client = mock_client
|
|
346
339
|
|
|
347
340
|
# Determine if we expect success or failure
|
|
@@ -352,7 +345,6 @@ class TestRetryLogic:
|
|
|
352
345
|
result = await provider.request_completions(
|
|
353
346
|
message_type=MessageType.CHAT,
|
|
354
347
|
messages=mock_messages,
|
|
355
|
-
model="gpt-4o-mini",
|
|
356
348
|
max_retries=max_retries
|
|
357
349
|
)
|
|
358
350
|
|
|
@@ -365,7 +357,6 @@ class TestRetryLogic:
|
|
|
365
357
|
await provider.request_completions(
|
|
366
358
|
message_type=MessageType.CHAT,
|
|
367
359
|
messages=mock_messages,
|
|
368
|
-
model="gpt-4o-mini",
|
|
369
360
|
max_retries=max_retries
|
|
370
361
|
)
|
|
371
362
|
|