mito-ai 0.1.57__py3-none-any.whl → 0.1.59__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mito_ai/__init__.py +19 -22
- mito_ai/_version.py +1 -1
- mito_ai/anthropic_client.py +24 -14
- mito_ai/chart_wizard/handlers.py +78 -17
- mito_ai/chart_wizard/urls.py +8 -5
- mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
- mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
- mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
- mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
- mito_ai/completions/completion_handlers/completion_handler.py +3 -5
- mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
- mito_ai/completions/completion_handlers/scratchpad_result_handler.py +6 -8
- mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
- mito_ai/completions/completion_handlers/utils.py +3 -7
- mito_ai/completions/handlers.py +32 -22
- mito_ai/completions/message_history.py +8 -10
- mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
- mito_ai/completions/prompt_builders/prompt_constants.py +2 -0
- mito_ai/constants.py +31 -2
- mito_ai/enterprise/__init__.py +1 -1
- mito_ai/enterprise/litellm_client.py +144 -0
- mito_ai/enterprise/utils.py +16 -2
- mito_ai/log/handlers.py +1 -1
- mito_ai/openai_client.py +36 -96
- mito_ai/provider_manager.py +420 -0
- mito_ai/settings/enterprise_handler.py +26 -0
- mito_ai/settings/urls.py +2 -0
- mito_ai/streamlit_conversion/agent_utils.py +2 -30
- mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
- mito_ai/streamlit_preview/handlers.py +6 -3
- mito_ai/streamlit_preview/urls.py +5 -3
- mito_ai/tests/message_history/test_generate_short_chat_name.py +103 -28
- mito_ai/tests/open_ai_utils_test.py +34 -36
- mito_ai/tests/providers/test_anthropic_client.py +174 -16
- mito_ai/tests/providers/test_azure.py +15 -15
- mito_ai/tests/providers/test_capabilities.py +14 -17
- mito_ai/tests/providers/test_gemini_client.py +14 -13
- mito_ai/tests/providers/test_model_resolution.py +145 -89
- mito_ai/tests/providers/test_openai_client.py +209 -13
- mito_ai/tests/providers/test_provider_limits.py +5 -5
- mito_ai/tests/providers/test_providers.py +229 -51
- mito_ai/tests/providers/test_retry_logic.py +13 -22
- mito_ai/tests/providers/utils.py +4 -4
- mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
- mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
- mito_ai/tests/test_constants.py +90 -0
- mito_ai/tests/test_enterprise_mode.py +217 -0
- mito_ai/tests/test_model_utils.py +362 -0
- mito_ai/utils/anthropic_utils.py +8 -6
- mito_ai/utils/gemini_utils.py +0 -3
- mito_ai/utils/litellm_utils.py +84 -0
- mito_ai/utils/model_utils.py +257 -0
- mito_ai/utils/open_ai_utils.py +29 -41
- mito_ai/utils/provider_utils.py +13 -29
- mito_ai/utils/telemetry_utils.py +14 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js +1059 -144
- mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js.map +1 -0
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js +17 -17
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js.map +1 -1
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +78 -78
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/METADATA +2 -1
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/RECORD +90 -83
- mito_ai/completions/providers.py +0 -284
- mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +0 -1
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
- {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/WHEEL +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/entry_points.txt +0 -0
- {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
# Copyright (c) Saga Inc.
|
|
2
|
+
# Distributed under the terms of the GNU Affero General Public License v3.0 License.
|
|
3
|
+
|
|
4
|
+
from typing import List, Tuple, Union, Optional, cast
|
|
5
|
+
from mito_ai import constants
|
|
6
|
+
from mito_ai.utils.version_utils import is_enterprise
|
|
7
|
+
from mito_ai.enterprise.utils import is_abacus_configured
|
|
8
|
+
|
|
9
|
+
# Model ordering: [fastest, ..., slowest] for each provider
|
|
10
|
+
ANTHROPIC_MODEL_ORDER = [
|
|
11
|
+
"claude-haiku-4-5-20251001", # Fastest
|
|
12
|
+
"claude-sonnet-4-5-20250929", # Slower
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
OPENAI_MODEL_ORDER = [
|
|
16
|
+
"gpt-4.1", # Fastest
|
|
17
|
+
"gpt-5",
|
|
18
|
+
"gpt-5.2", # Slower
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
GEMINI_MODEL_ORDER = [
|
|
22
|
+
"gemini-3-flash-preview", # Fastest
|
|
23
|
+
"gemini-3-pro-preview", # Slower
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
# Standard model names (used when not in enterprise mode or when LiteLLM is not configured)
|
|
27
|
+
STANDARD_MODELS = [
|
|
28
|
+
"gpt-4.1",
|
|
29
|
+
"gpt-5.2",
|
|
30
|
+
"claude-sonnet-4-5-20250929",
|
|
31
|
+
"claude-haiku-4-5-20251001",
|
|
32
|
+
"gemini-3-flash-preview",
|
|
33
|
+
"gemini-3-pro-preview",
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def get_available_models() -> List[str]:
|
|
38
|
+
"""
|
|
39
|
+
Determine which models are available based on enterprise mode and router configuration.
|
|
40
|
+
|
|
41
|
+
Priority order:
|
|
42
|
+
1. Abacus (if configured)
|
|
43
|
+
2. LiteLLM (if configured)
|
|
44
|
+
3. Standard models
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
List of available model names with appropriate prefixes.
|
|
48
|
+
"""
|
|
49
|
+
# Check if enterprise mode is enabled AND Abacus is configured (highest priority)
|
|
50
|
+
if is_abacus_configured():
|
|
51
|
+
# Return Abacus models (with Abacus/ prefix)
|
|
52
|
+
return constants.ABACUS_MODELS
|
|
53
|
+
# Check if enterprise mode is enabled AND LiteLLM is configured
|
|
54
|
+
elif is_enterprise() and constants.LITELLM_BASE_URL and constants.LITELLM_MODELS:
|
|
55
|
+
# Return LiteLLM models (with LiteLLM/provider/ prefix or legacy provider/ prefix)
|
|
56
|
+
return constants.LITELLM_MODELS
|
|
57
|
+
else:
|
|
58
|
+
# Return standard models
|
|
59
|
+
return STANDARD_MODELS
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_fast_model_for_selected_model(selected_model: str) -> str:
|
|
63
|
+
"""
|
|
64
|
+
Get the fastest model for the client of the selected model.
|
|
65
|
+
|
|
66
|
+
- For standard providers, returns the first (fastest) model from that provider's order.
|
|
67
|
+
- For enterprise router models (Abacus/LiteLLM), finds the fastest available model by comparing indices.
|
|
68
|
+
"""
|
|
69
|
+
# Check if this is an enterprise router model (has "/" or router prefix)
|
|
70
|
+
if "/" in selected_model or selected_model.lower().startswith(('abacus/', 'litellm/')):
|
|
71
|
+
# Find the fastest model from available models
|
|
72
|
+
available_models = get_available_models()
|
|
73
|
+
if not available_models:
|
|
74
|
+
return selected_model
|
|
75
|
+
|
|
76
|
+
# Filter to only router models (those with "/")
|
|
77
|
+
router_models = [model for model in available_models if "/" in model]
|
|
78
|
+
if not router_models:
|
|
79
|
+
return selected_model
|
|
80
|
+
|
|
81
|
+
# Extract provider/model pairs for ordering
|
|
82
|
+
pairs_with_indices = []
|
|
83
|
+
for model in router_models:
|
|
84
|
+
# Strip router prefix to get underlying model info
|
|
85
|
+
model_without_router = strip_router_prefix(model)
|
|
86
|
+
|
|
87
|
+
# For Abacus: model_without_router is just the model name (e.g., "gpt-4.1")
|
|
88
|
+
# For LiteLLM: model_without_router is "provider/model" (e.g., "openai/gpt-4.1")
|
|
89
|
+
if "/" in model_without_router:
|
|
90
|
+
# LiteLLM format: provider/model
|
|
91
|
+
pair = model_without_router.split("/", 1)
|
|
92
|
+
else:
|
|
93
|
+
# Abacus format: just model name, need to determine provider
|
|
94
|
+
provider = get_underlying_model_provider(model)
|
|
95
|
+
if provider:
|
|
96
|
+
pair = [provider, model_without_router]
|
|
97
|
+
else:
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
index = get_model_order_index(pair)
|
|
101
|
+
if index is not None:
|
|
102
|
+
pairs_with_indices.append((model, index))
|
|
103
|
+
|
|
104
|
+
if not pairs_with_indices:
|
|
105
|
+
return selected_model
|
|
106
|
+
|
|
107
|
+
# Find the model with the minimum index (fastest model)
|
|
108
|
+
fastest_model, _ = min(pairs_with_indices, key=lambda x: x[1])
|
|
109
|
+
|
|
110
|
+
return fastest_model
|
|
111
|
+
|
|
112
|
+
# Standard provider logic - ensure we return a model from the same provider
|
|
113
|
+
model_lower = selected_model.lower()
|
|
114
|
+
|
|
115
|
+
# Determine provider and get fastest model
|
|
116
|
+
if model_lower.startswith('claude'):
|
|
117
|
+
return ANTHROPIC_MODEL_ORDER[0]
|
|
118
|
+
elif model_lower.startswith('gpt'):
|
|
119
|
+
return OPENAI_MODEL_ORDER[0]
|
|
120
|
+
elif model_lower.startswith('gemini'):
|
|
121
|
+
return GEMINI_MODEL_ORDER[0]
|
|
122
|
+
|
|
123
|
+
return selected_model
|
|
124
|
+
|
|
125
|
+
def get_smartest_model_for_selected_model(selected_model: str) -> str:
|
|
126
|
+
"""
|
|
127
|
+
Get the smartest model for the client of the selected model.
|
|
128
|
+
|
|
129
|
+
- For standard providers, returns the last (smartest) model from that provider's order.
|
|
130
|
+
- For enterprise router models (Abacus/LiteLLM), finds the smartest available model by comparing indices.
|
|
131
|
+
"""
|
|
132
|
+
# Check if this is an enterprise router model (has "/" or router prefix)
|
|
133
|
+
if "/" in selected_model or selected_model.lower().startswith(('abacus/', 'litellm/')):
|
|
134
|
+
# Extract underlying provider from selected model
|
|
135
|
+
selected_provider = get_underlying_model_provider(selected_model)
|
|
136
|
+
if not selected_provider:
|
|
137
|
+
return selected_model
|
|
138
|
+
|
|
139
|
+
# Find the smartest model from available models
|
|
140
|
+
available_models = get_available_models()
|
|
141
|
+
if not available_models:
|
|
142
|
+
return selected_model
|
|
143
|
+
|
|
144
|
+
# Filter to only router models with the same underlying provider
|
|
145
|
+
router_models = []
|
|
146
|
+
for model in available_models:
|
|
147
|
+
if "/" in model:
|
|
148
|
+
model_provider = get_underlying_model_provider(model)
|
|
149
|
+
if model_provider == selected_provider:
|
|
150
|
+
router_models.append(model)
|
|
151
|
+
|
|
152
|
+
if not router_models:
|
|
153
|
+
return selected_model
|
|
154
|
+
|
|
155
|
+
# Extract provider/model pairs for ordering
|
|
156
|
+
pairs_with_indices = []
|
|
157
|
+
for model in router_models:
|
|
158
|
+
# Strip router prefix to get underlying model info
|
|
159
|
+
model_without_router = strip_router_prefix(model)
|
|
160
|
+
|
|
161
|
+
# For Abacus: model_without_router is just the model name (e.g., "gpt-4.1")
|
|
162
|
+
# For LiteLLM: model_without_router is "provider/model" (e.g., "openai/gpt-4.1")
|
|
163
|
+
if "/" in model_without_router:
|
|
164
|
+
# LiteLLM format: provider/model
|
|
165
|
+
pair = model_without_router.split("/", 1)
|
|
166
|
+
else:
|
|
167
|
+
# Abacus format: just model name, provider already determined
|
|
168
|
+
pair = [selected_provider, model_without_router]
|
|
169
|
+
|
|
170
|
+
index = get_model_order_index(pair)
|
|
171
|
+
if index is not None:
|
|
172
|
+
pairs_with_indices.append((model, index))
|
|
173
|
+
|
|
174
|
+
if not pairs_with_indices:
|
|
175
|
+
return selected_model
|
|
176
|
+
|
|
177
|
+
# Find the model with the maximum index (smartest model)
|
|
178
|
+
smartest_model, _ = max(pairs_with_indices, key=lambda x: x[1])
|
|
179
|
+
|
|
180
|
+
return smartest_model
|
|
181
|
+
|
|
182
|
+
# Standard provider logic
|
|
183
|
+
model_lower = selected_model.lower()
|
|
184
|
+
|
|
185
|
+
# Determine provider and get smartest model
|
|
186
|
+
if model_lower.startswith('claude'):
|
|
187
|
+
return ANTHROPIC_MODEL_ORDER[-1]
|
|
188
|
+
elif model_lower.startswith('gpt'):
|
|
189
|
+
return OPENAI_MODEL_ORDER[-1]
|
|
190
|
+
elif model_lower.startswith('gemini'):
|
|
191
|
+
return GEMINI_MODEL_ORDER[-1]
|
|
192
|
+
|
|
193
|
+
return selected_model
|
|
194
|
+
|
|
195
|
+
def strip_router_prefix(model: str) -> str:
|
|
196
|
+
"""
|
|
197
|
+
Strip router prefix from model name.
|
|
198
|
+
|
|
199
|
+
Examples:
|
|
200
|
+
- "Abacus/gpt-4.1" -> "gpt-4.1"
|
|
201
|
+
- "LiteLLM/openai/gpt-4.1" -> "openai/gpt-4.1"
|
|
202
|
+
- "gpt-4.1" -> "gpt-4.1" (no prefix, return as-is)
|
|
203
|
+
"""
|
|
204
|
+
if model.lower().startswith('abacus/'):
|
|
205
|
+
return model[7:] # Strip "Abacus/"
|
|
206
|
+
elif model.lower().startswith('litellm/'):
|
|
207
|
+
return model[8:] # Strip "LiteLLM/"
|
|
208
|
+
return model
|
|
209
|
+
|
|
210
|
+
def get_underlying_model_provider(full_model_provider_id: str) -> Optional[str]:
|
|
211
|
+
"""
|
|
212
|
+
Determine the underlying AI provider from a model identifier.
|
|
213
|
+
|
|
214
|
+
For Abacus models (Abacus/model), determine the provider from model name pattern.
|
|
215
|
+
For LiteLLM models (LiteLLM/provider/model), extract the provider from the prefix.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Provider name ("openai", "anthropic", "google") or None if cannot determine.
|
|
219
|
+
"""
|
|
220
|
+
# Strip router prefix first
|
|
221
|
+
model_without_router = strip_router_prefix(full_model_provider_id)
|
|
222
|
+
|
|
223
|
+
# Check if it's a LiteLLM format (provider/model)
|
|
224
|
+
if "/" in model_without_router:
|
|
225
|
+
provider, _ = model_without_router.split("/", 1)
|
|
226
|
+
return provider.lower()
|
|
227
|
+
|
|
228
|
+
# For Abacus models without provider prefix, determine from model name
|
|
229
|
+
model_lower = model_without_router.lower()
|
|
230
|
+
if model_lower.startswith('gpt'):
|
|
231
|
+
return 'openai'
|
|
232
|
+
elif model_lower.startswith('claude'):
|
|
233
|
+
return 'anthropic'
|
|
234
|
+
elif model_lower.startswith('gemini'):
|
|
235
|
+
return 'google'
|
|
236
|
+
|
|
237
|
+
return None
|
|
238
|
+
|
|
239
|
+
def get_model_order_index(pair: List[str]) -> Optional[int]:
|
|
240
|
+
provider, model_name = pair
|
|
241
|
+
if provider == "openai":
|
|
242
|
+
try:
|
|
243
|
+
return OPENAI_MODEL_ORDER.index(model_name)
|
|
244
|
+
except ValueError:
|
|
245
|
+
return None
|
|
246
|
+
elif provider == "anthropic":
|
|
247
|
+
try:
|
|
248
|
+
return ANTHROPIC_MODEL_ORDER.index(model_name)
|
|
249
|
+
except ValueError:
|
|
250
|
+
return None
|
|
251
|
+
elif provider == "google":
|
|
252
|
+
try:
|
|
253
|
+
return GEMINI_MODEL_ORDER.index(model_name)
|
|
254
|
+
except ValueError:
|
|
255
|
+
return None
|
|
256
|
+
else:
|
|
257
|
+
return None
|
mito_ai/utils/open_ai_utils.py
CHANGED
|
@@ -6,31 +6,18 @@
|
|
|
6
6
|
|
|
7
7
|
# Copyright (c) Saga Inc.
|
|
8
8
|
|
|
9
|
-
import
|
|
10
|
-
import json
|
|
11
|
-
import time
|
|
12
|
-
from typing import Any, Dict, List, Optional, Final, Union, AsyncGenerator, Tuple, Callable
|
|
9
|
+
from typing import Any, Dict, List, Optional, Union, AsyncGenerator, Tuple, Callable
|
|
13
10
|
from mito_ai.utils.mito_server_utils import get_response_from_mito_server, stream_response_from_mito_server
|
|
14
|
-
from mito_ai.utils.provider_utils import does_message_require_fast_model
|
|
15
|
-
from tornado.httpclient import AsyncHTTPClient
|
|
16
11
|
from openai.types.chat import ChatCompletionMessageParam
|
|
17
|
-
|
|
18
|
-
from mito_ai.utils.utils import is_running_test
|
|
19
|
-
from mito_ai.completions.models import MessageType, ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem
|
|
12
|
+
from mito_ai.completions.models import MessageType, ResponseFormatInfo, CompletionReply, CompletionStreamChunk
|
|
20
13
|
from mito_ai.utils.schema import UJ_STATIC_USER_ID, UJ_USER_EMAIL
|
|
21
14
|
from mito_ai.utils.db import get_user_field
|
|
22
|
-
from mito_ai.utils
|
|
23
|
-
from mito_ai.utils.server_limits import check_mito_server_quota
|
|
24
|
-
from mito_ai.utils.telemetry_utils import log_ai_completion_success
|
|
25
|
-
from .utils import _create_http_client
|
|
15
|
+
from mito_ai.enterprise.utils import is_abacus_configured
|
|
26
16
|
from mito_ai.constants import MITO_OPENAI_URL
|
|
27
17
|
|
|
28
|
-
|
|
29
18
|
__user_email: Optional[str] = None
|
|
30
19
|
__user_id: Optional[str] = None
|
|
31
20
|
|
|
32
|
-
FAST_OPENAI_MODEL = "gpt-4.1-nano"
|
|
33
|
-
|
|
34
21
|
def _prepare_request_data_and_headers(
|
|
35
22
|
last_message_content: Union[str, None],
|
|
36
23
|
ai_completion_data: Dict[str, Any],
|
|
@@ -153,19 +140,12 @@ async def stream_ai_completion_from_mito_server(
|
|
|
153
140
|
|
|
154
141
|
|
|
155
142
|
def get_open_ai_completion_function_params(
|
|
156
|
-
message_type: MessageType,
|
|
157
143
|
model: str,
|
|
158
144
|
messages: List[ChatCompletionMessageParam],
|
|
159
145
|
stream: bool,
|
|
160
146
|
response_format_info: Optional[ResponseFormatInfo] = None,
|
|
161
147
|
) -> Dict[str, Any]:
|
|
162
148
|
|
|
163
|
-
print("MESSAGE TYPE: ", message_type)
|
|
164
|
-
message_requires_fast_model = does_message_require_fast_model(message_type)
|
|
165
|
-
model = FAST_OPENAI_MODEL if message_requires_fast_model else model
|
|
166
|
-
|
|
167
|
-
print(f"model: {model}")
|
|
168
|
-
|
|
169
149
|
completion_function_params = {
|
|
170
150
|
"model": model,
|
|
171
151
|
"stream": stream,
|
|
@@ -179,25 +159,33 @@ def get_open_ai_completion_function_params(
|
|
|
179
159
|
# Pydantic models are supported by the OpenAI API, however, we need to be able to
|
|
180
160
|
# serialize it for requests that are going to be sent to the mito server.
|
|
181
161
|
# OpenAI expects a very specific schema as seen below.
|
|
162
|
+
# Note: Abacus only supports {"type": "json"} format, not the full JSON schema format.
|
|
182
163
|
if response_format_info:
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
"json_schema
|
|
197
|
-
"
|
|
198
|
-
|
|
199
|
-
|
|
164
|
+
# Check if we're using Abacus - it only supports simple {"type": "json"} format
|
|
165
|
+
if is_abacus_configured() or model.lower().startswith('abacus/'):
|
|
166
|
+
completion_function_params["response_format"] = {
|
|
167
|
+
"type": "json"
|
|
168
|
+
}
|
|
169
|
+
else:
|
|
170
|
+
# For OpenAI and other providers, use the full JSON schema format
|
|
171
|
+
json_schema = response_format_info.format.schema()
|
|
172
|
+
|
|
173
|
+
# Add additionalProperties: False to the top-level schema
|
|
174
|
+
json_schema["additionalProperties"] = False
|
|
175
|
+
|
|
176
|
+
# Nested object definitions in $defs need to have additionalProperties set to False also
|
|
177
|
+
if "$defs" in json_schema:
|
|
178
|
+
for def_name, def_schema in json_schema["$defs"].items():
|
|
179
|
+
if def_schema.get("type") == "object":
|
|
180
|
+
def_schema["additionalProperties"] = False
|
|
181
|
+
|
|
182
|
+
completion_function_params["response_format"] = {
|
|
183
|
+
"type": "json_schema",
|
|
184
|
+
"json_schema": {
|
|
185
|
+
"name": f"{response_format_info.name}",
|
|
186
|
+
"schema": json_schema,
|
|
187
|
+
"strict": True
|
|
188
|
+
}
|
|
200
189
|
}
|
|
201
|
-
}
|
|
202
190
|
|
|
203
191
|
return completion_function_params
|
mito_ai/utils/provider_utils.py
CHANGED
|
@@ -8,13 +8,25 @@ from mito_ai.completions.models import MessageType
|
|
|
8
8
|
|
|
9
9
|
def get_model_provider(model: str) -> Union[str, None]:
|
|
10
10
|
"""
|
|
11
|
-
Determine the model type based on the model name prefix
|
|
11
|
+
Determine the model type based on the model name prefix.
|
|
12
|
+
|
|
13
|
+
Priority order:
|
|
14
|
+
1. Check for router prefixes (Abacus/, LiteLLM/)
|
|
15
|
+
2. Check for legacy LiteLLM format (provider/model)
|
|
16
|
+
3. Check for standard model name patterns
|
|
12
17
|
"""
|
|
13
18
|
if not model:
|
|
14
19
|
return None
|
|
15
20
|
|
|
16
21
|
model_lower = model.lower()
|
|
17
22
|
|
|
23
|
+
# Check for router prefixes first (highest priority)
|
|
24
|
+
if model_lower.startswith('abacus/'):
|
|
25
|
+
return 'abacus'
|
|
26
|
+
elif model_lower.startswith('litellm/'):
|
|
27
|
+
return 'litellm'
|
|
28
|
+
|
|
29
|
+
# Check for standard model name patterns
|
|
18
30
|
if model_lower.startswith('claude'):
|
|
19
31
|
return 'claude'
|
|
20
32
|
elif model_lower.startswith('gemini'):
|
|
@@ -25,32 +37,4 @@ def get_model_provider(model: str) -> Union[str, None]:
|
|
|
25
37
|
return 'openai'
|
|
26
38
|
|
|
27
39
|
return None
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def does_message_require_fast_model(message_type: MessageType) -> bool:
|
|
31
|
-
"""
|
|
32
|
-
Determines if a message requires the fast model.
|
|
33
|
-
|
|
34
|
-
The fast model is used for messages that are not chat messages.
|
|
35
|
-
For example, inline completions and chat name generation need to be fast
|
|
36
|
-
so they don't slow down the user's experience.
|
|
37
|
-
"""
|
|
38
|
-
|
|
39
|
-
if message_type in (
|
|
40
|
-
MessageType.CHAT,
|
|
41
|
-
MessageType.SMART_DEBUG,
|
|
42
|
-
MessageType.CODE_EXPLAIN,
|
|
43
|
-
MessageType.AGENT_EXECUTION,
|
|
44
|
-
MessageType.AGENT_SCRATCHPAD_RESULT,
|
|
45
|
-
MessageType.AGENT_AUTO_ERROR_FIXUP,
|
|
46
|
-
):
|
|
47
|
-
return False
|
|
48
|
-
elif message_type in (MessageType.INLINE_COMPLETION, MessageType.CHAT_NAME_GENERATION):
|
|
49
|
-
return True
|
|
50
|
-
elif message_type in (MessageType.START_NEW_CHAT, MessageType.FETCH_HISTORY, MessageType.GET_THREADS, MessageType.DELETE_THREAD, MessageType.UPDATE_MODEL_CONFIG):
|
|
51
|
-
# These messages don't use any model, but we add them here for type safety
|
|
52
|
-
return True
|
|
53
|
-
else:
|
|
54
|
-
raise ValueError(f"Invalid message type: {message_type}")
|
|
55
|
-
|
|
56
40
|
|
mito_ai/utils/telemetry_utils.py
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
6
6
|
from typing import Any, Dict, Literal, Optional, List
|
|
7
|
-
from mito_ai.utils.version_utils import MITOSHEET_HELPER_PRIVATE, is_pro
|
|
7
|
+
from mito_ai.utils.version_utils import MITOSHEET_HELPER_PRIVATE, is_pro, is_enterprise
|
|
8
8
|
from mito_ai.utils.schema import UJ_AI_MITO_API_NUM_USAGES, UJ_MITOSHEET_TELEMETRY, UJ_STATIC_USER_ID, UJ_USER_EMAIL, UJ_FEEDBACKS_V2
|
|
9
9
|
from mito_ai.utils.db import get_user_field
|
|
10
10
|
from mito_ai._version import __version__
|
|
@@ -83,9 +83,13 @@ def telemetry_turned_on(key_type: Optional[str] = None) -> bool:
|
|
|
83
83
|
Helper function that tells you if logging is turned on or
|
|
84
84
|
turned off on the entire Mito instance
|
|
85
85
|
"""
|
|
86
|
+
# Enterprise mode disables all telemetry
|
|
87
|
+
if is_enterprise():
|
|
88
|
+
return False
|
|
89
|
+
|
|
86
90
|
# If the user is on the Mito server, then they are sending
|
|
87
91
|
# us their information already
|
|
88
|
-
if key_type ==
|
|
92
|
+
if key_type == MITO_SERVER_KEY:
|
|
89
93
|
return True
|
|
90
94
|
|
|
91
95
|
# If private helper is installed, then we don't log anything
|
|
@@ -109,6 +113,10 @@ def identify(key_type: Optional[str] = None, is_electron: Optional[bool] = None)
|
|
|
109
113
|
Helper function for identifying a user. We just take
|
|
110
114
|
their python version, mito version, and email.
|
|
111
115
|
"""
|
|
116
|
+
# Skip entirely if enterprise mode is enabled
|
|
117
|
+
if is_enterprise():
|
|
118
|
+
return
|
|
119
|
+
|
|
112
120
|
if not telemetry_turned_on(key_type):
|
|
113
121
|
return
|
|
114
122
|
|
|
@@ -208,6 +216,10 @@ def log(
|
|
|
208
216
|
del final_params[param_name]
|
|
209
217
|
final_params.update(params_to_add)
|
|
210
218
|
|
|
219
|
+
# Skip entirely if enterprise mode is enabled
|
|
220
|
+
if is_enterprise():
|
|
221
|
+
return
|
|
222
|
+
|
|
211
223
|
# Finally, do the acutal logging. We do not log anything when tests are
|
|
212
224
|
# running, or if telemetry is turned off
|
|
213
225
|
if not is_running_test() and telemetry_turned_on(key_type):
|