mito-ai 0.1.58__py3-none-any.whl → 0.1.59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. mito_ai/__init__.py +5 -2
  2. mito_ai/_version.py +1 -1
  3. mito_ai/completions/prompt_builders/prompt_constants.py +2 -0
  4. mito_ai/constants.py +25 -3
  5. mito_ai/enterprise/litellm_client.py +12 -5
  6. mito_ai/enterprise/utils.py +16 -2
  7. mito_ai/openai_client.py +26 -6
  8. mito_ai/provider_manager.py +34 -2
  9. mito_ai/tests/message_history/test_generate_short_chat_name.py +35 -4
  10. mito_ai/tests/open_ai_utils_test.py +34 -36
  11. mito_ai/tests/providers/test_azure.py +2 -2
  12. mito_ai/tests/test_constants.py +90 -0
  13. mito_ai/tests/test_enterprise_mode.py +55 -0
  14. mito_ai/tests/test_model_utils.py +116 -25
  15. mito_ai/utils/model_utils.py +130 -51
  16. mito_ai/utils/open_ai_utils.py +29 -33
  17. mito_ai/utils/provider_utils.py +13 -7
  18. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  19. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  20. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  21. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js +389 -70
  22. mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js.map +1 -0
  23. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js +3 -3
  24. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js.map +1 -1
  25. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +78 -78
  26. {mito_ai-0.1.58.dist-info → mito_ai-0.1.59.dist-info}/METADATA +1 -1
  27. {mito_ai-0.1.58.dist-info → mito_ai-0.1.59.dist-info}/RECORD +52 -52
  28. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +0 -1
  29. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  30. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  31. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  32. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  33. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  34. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  35. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  36. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  37. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  38. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  39. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  40. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  41. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  42. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  43. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  44. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  45. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  46. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  47. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  48. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  49. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  50. {mito_ai-0.1.58.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  51. {mito_ai-0.1.58.dist-info → mito_ai-0.1.59.dist-info}/WHEEL +0 -0
  52. {mito_ai-0.1.58.dist-info → mito_ai-0.1.59.dist-info}/entry_points.txt +0 -0
  53. {mito_ai-0.1.58.dist-info → mito_ai-0.1.59.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py CHANGED
@@ -9,6 +9,7 @@ from mito_ai.completions.message_history import GlobalMessageHistory
9
9
  from mito_ai.app_deploy.handlers import AppDeployHandler
10
10
  from mito_ai.log.urls import get_log_urls
11
11
  from mito_ai.utils.litellm_utils import is_litellm_configured
12
+ from mito_ai.enterprise.utils import is_abacus_configured
12
13
  from mito_ai.version_check import VersionCheckHandler
13
14
  from mito_ai.db.urls import get_db_urls
14
15
  from mito_ai.settings.urls import get_settings_urls
@@ -101,10 +102,12 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
101
102
 
102
103
  web_app.add_handlers(host_pattern, handlers)
103
104
 
104
- # Log enterprise mode status and LiteLLM configuration
105
+ # Log enterprise mode status and router configuration
105
106
  if is_enterprise():
106
107
  server_app.log.info("Enterprise mode enabled")
107
- if is_litellm_configured():
108
+ if is_abacus_configured():
109
+ server_app.log.info(f"Abacus AI configured: endpoint={constants.ABACUS_BASE_URL}, models={constants.ABACUS_MODELS}")
110
+ elif is_litellm_configured():
108
111
  server_app.log.info(f"LiteLLM configured: endpoint={constants.LITELLM_BASE_URL}, models={constants.LITELLM_MODELS}")
109
112
 
110
113
  server_app.log.info("Loaded the mito_ai server extension")
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.58'
4
+ __version__ = VERSION = '0.1.59'
@@ -20,6 +20,8 @@ Rules:
20
20
  - All imports must appear at the top, before the chart configuration section.
21
21
  - Variables with multiple words should be underscore-separated.
22
22
  - All colors should be in hex format (e.g., "#3498db"). Use quotes around the hex string: COLOR = "#3498db" or COLOR = '#3498db'. Do NOT nest quotes.
23
+ - Never use RGB/RGBA tuples/lists for colors (e.g. (0, 0.4, 0.8, 0.8) is forbidden).
24
+ - If transparency is needed, store it separately as ALPHA = 0.8 and apply it in code (e.g. to_rgba(HEX_COLOR, ALPHA)).
23
25
  - Variables can only be strings, numbers, booleans, tuples, or lists.
24
26
  - NEVER include comments on the same line as a variable assignment. Each variable assignment must be on its own line with no trailing comments.
25
27
  - For string values, use either single or double quotes (e.g., TITLE = "Sales by Product" or TITLE = 'Sales by Product'). Do not use nested quotes (e.g., do NOT use '"value"').
mito_ai/constants.py CHANGED
@@ -2,7 +2,7 @@
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
4
  import os
5
- from typing import Union
5
+ from typing import Union, List
6
6
 
7
7
  # Claude
8
8
  ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
@@ -23,12 +23,34 @@ AZURE_OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION")
23
23
  AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
24
24
  AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL")
25
25
 
26
+ def parse_comma_separated_models(models_str: str) -> List[str]:
27
+ """
28
+ Parse a comma-separated string of model names into a list.
29
+ Handles quoted and unquoted values, stripping whitespace and quotes.
30
+
31
+ Args:
32
+ models_str: Comma-separated string of model names (e.g., "model1,model2" or '"model1","model2"')
33
+
34
+ Returns:
35
+ List of model names with whitespace and quotes stripped
36
+ """
37
+ if not models_str:
38
+ return []
39
+ return [model.strip().strip('"\'') for model in models_str.split(",") if model.strip()]
40
+
26
41
  # LiteLLM Config (Enterprise mode only)
27
42
  LITELLM_BASE_URL = os.environ.get("LITELLM_BASE_URL")
28
43
  LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
29
44
  LITELLM_MODELS_STR = os.environ.get("LITELLM_MODELS", "")
30
- # Parse comma-separated string into list, strip whitespace
31
- LITELLM_MODELS = [model.strip() for model in LITELLM_MODELS_STR.split(",") if model.strip()] if LITELLM_MODELS_STR else []
45
+ # Parse comma-separated string into list, strip whitespace and quotes
46
+ LITELLM_MODELS = parse_comma_separated_models(LITELLM_MODELS_STR)
47
+
48
+ # Abacus AI Config (Enterprise mode only)
49
+ ABACUS_BASE_URL = os.environ.get("ABACUS_BASE_URL")
50
+ ABACUS_API_KEY = os.environ.get("ABACUS_API_KEY")
51
+ ABACUS_MODELS_STR = os.environ.get("ABACUS_MODELS", "")
52
+ # Parse comma-separated string into list, strip whitespace and quotes
53
+ ABACUS_MODELS = parse_comma_separated_models(ABACUS_MODELS_STR)
32
54
 
33
55
  # Mito AI Base URLs and Endpoint Paths
34
56
  MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v2"
@@ -11,6 +11,7 @@ from mito_ai.completions.models import (
11
11
  CompletionItem,
12
12
  )
13
13
  from mito_ai.utils.litellm_utils import get_litellm_completion_function_params
14
+ from mito_ai.utils.model_utils import strip_router_prefix
14
15
  import litellm
15
16
 
16
17
  class LiteLLMClient:
@@ -28,7 +29,7 @@ class LiteLLMClient:
28
29
  async def request_completions(
29
30
  self,
30
31
  messages: List[ChatCompletionMessageParam],
31
- model: str, # Should include provider prefix (e.g., "openai/gpt-4o")
32
+ model: str, # Should include provider prefix (e.g., "LiteLLM/openai/gpt-4o")
32
33
  response_format_info: Optional[ResponseFormatInfo] = None,
33
34
  message_type: MessageType = MessageType.CHAT
34
35
  ) -> str:
@@ -37,16 +38,19 @@ class LiteLLMClient:
37
38
 
38
39
  Args:
39
40
  messages: List of chat messages
40
- model: Model name with provider prefix (e.g., "openai/gpt-4o")
41
+ model: Model name with router and provider prefix (e.g., "LiteLLM/openai/gpt-4o")
41
42
  response_format_info: Optional response format specification
42
43
  message_type: Type of message (chat, agent execution, etc.)
43
44
 
44
45
  Returns:
45
46
  The completion text response
46
47
  """
48
+ # Strip router prefix if present (LiteLLM/ prefix)
49
+ model_for_litellm = strip_router_prefix(model)
50
+
47
51
  # Prepare parameters for LiteLLM
48
52
  params = get_litellm_completion_function_params(
49
- model=model,
53
+ model=model_for_litellm,
50
54
  messages=messages,
51
55
  api_key=self.api_key,
52
56
  api_base=self.base_url,
@@ -82,7 +86,7 @@ class LiteLLMClient:
82
86
 
83
87
  Args:
84
88
  messages: List of chat messages
85
- model: Model name with provider prefix (e.g., "openai/gpt-4o")
89
+ model: Model name with router and provider prefix (e.g., "LiteLLM/openai/gpt-4o")
86
90
  message_type: Type of message (chat, agent execution, etc.)
87
91
  message_id: ID of the message being processed
88
92
  reply_fn: Function to call with each chunk for streaming replies
@@ -93,9 +97,12 @@ class LiteLLMClient:
93
97
  """
94
98
  accumulated_response = ""
95
99
 
100
+ # Strip router prefix if present (LiteLLM/ prefix)
101
+ model_for_litellm = strip_router_prefix(model)
102
+
96
103
  # Prepare parameters for LiteLLM
97
104
  params = get_litellm_completion_function_params(
98
- model=model,
105
+ model=model_for_litellm,
99
106
  messages=messages,
100
107
  api_key=self.api_key,
101
108
  api_base=self.base_url,
@@ -5,11 +5,25 @@
5
5
  # Distributed under the terms of the The Mito Enterprise license.
6
6
 
7
7
  from mito_ai.utils.version_utils import is_enterprise, is_mitosheet_private
8
- from mito_ai.constants import AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL
8
+ from mito_ai.constants import (
9
+ AZURE_OPENAI_API_KEY,
10
+ AZURE_OPENAI_ENDPOINT,
11
+ AZURE_OPENAI_API_VERSION,
12
+ AZURE_OPENAI_MODEL,
13
+ ABACUS_BASE_URL,
14
+ ABACUS_MODELS
15
+ )
9
16
 
10
17
  def is_azure_openai_configured() -> bool:
11
18
  """
12
19
  Azure OpenAI is only supported for Mito Enterprise users
13
20
  """
14
21
  is_allowed_to_use_azure = is_enterprise() or is_mitosheet_private()
15
- return all([is_allowed_to_use_azure, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL])
22
+ return all([is_allowed_to_use_azure, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL])
23
+
24
+ def is_abacus_configured() -> bool:
25
+ """
26
+ Abacus AI is only supported for Mito Enterprise users.
27
+ Checks if Abacus AI is configured with base URL and models.
28
+ """
29
+ return all([is_enterprise(), ABACUS_BASE_URL, ABACUS_MODELS])
mito_ai/openai_client.py CHANGED
@@ -11,8 +11,9 @@ from traitlets import Instance, default, validate
11
11
  from traitlets.config import LoggingConfigurable
12
12
 
13
13
  from mito_ai import constants
14
- from mito_ai.enterprise.utils import is_azure_openai_configured
14
+ from mito_ai.enterprise.utils import is_azure_openai_configured, is_abacus_configured
15
15
  from mito_ai.logger import get_logger
16
+ from mito_ai.utils.model_utils import strip_router_prefix
16
17
  from mito_ai.completions.models import (
17
18
  AICapabilities,
18
19
  CompletionError,
@@ -24,12 +25,11 @@ from mito_ai.completions.models import (
24
25
  ResponseFormatInfo,
25
26
  )
26
27
  from mito_ai.utils.open_ai_utils import (
27
- check_mito_server_quota,
28
28
  get_ai_completion_from_mito_server,
29
29
  get_open_ai_completion_function_params,
30
30
  stream_ai_completion_from_mito_server,
31
31
  )
32
- from mito_ai.utils.server_limits import update_mito_server_quota
32
+ from mito_ai.utils.server_limits import update_mito_server_quota, check_mito_server_quota
33
33
 
34
34
  OPENAI_MODEL_FALLBACK = "gpt-4.1"
35
35
 
@@ -68,6 +68,14 @@ This attribute is observed by the websocket provider to push the error to the cl
68
68
  provider="Azure OpenAI",
69
69
  )
70
70
 
71
+ if is_abacus_configured():
72
+ return AICapabilities(
73
+ configuration={
74
+ "model": "<dynamic>"
75
+ },
76
+ provider="Abacus AI",
77
+ )
78
+
71
79
  if constants.OLLAMA_MODEL:
72
80
  return AICapabilities(
73
81
  configuration={
@@ -121,6 +129,10 @@ This attribute is observed by the websocket provider to push the error to the cl
121
129
  timeout=self.timeout,
122
130
  )
123
131
 
132
+ elif is_abacus_configured():
133
+ base_url = constants.ABACUS_BASE_URL
134
+ llm_api_key = constants.ABACUS_API_KEY
135
+ self.log.debug(f"Using Abacus AI with base URL: {constants.ABACUS_BASE_URL}")
124
136
  elif constants.OLLAMA_MODEL:
125
137
  base_url = constants.OLLAMA_BASE_URL
126
138
  llm_api_key = "ollama"
@@ -141,17 +153,25 @@ This attribute is observed by the websocket provider to push the error to the cl
141
153
  )
142
154
  return client
143
155
 
144
- def _adjust_model_for_azure_or_ollama(self, model: str) -> str:
156
+ def _adjust_model_for_provider(self, model: str) -> str:
145
157
 
146
158
  # If they have set an Azure OpenAI model, then we always use it
147
159
  if is_azure_openai_configured() and constants.AZURE_OPENAI_MODEL is not None:
148
160
  self.log.debug(f"Resolving to Azure OpenAI model: {constants.AZURE_OPENAI_MODEL}")
161
+ # TODO: We should update Azure so it works the way LiteLLM and Abacus do:
162
+ # when configured, we only show models from Azure in the UI.
149
163
  return constants.AZURE_OPENAI_MODEL
150
164
 
151
165
  # If they have set an Ollama model, then we use it
152
166
  if constants.OLLAMA_MODEL is not None:
153
167
  return constants.OLLAMA_MODEL
154
168
 
169
+ # If using Abacus, strip the "Abacus/" prefix from the model name
170
+ if is_abacus_configured() and model.lower().startswith('abacus/'):
171
+ stripped_model = strip_router_prefix(model)
172
+ self.log.debug(f"Stripping Abacus prefix: {model} -> {stripped_model}")
173
+ return stripped_model
174
+
155
175
  # Otherwise, we use the model they provided
156
176
  return model
157
177
 
@@ -186,7 +206,7 @@ This attribute is observed by the websocket provider to push the error to the cl
186
206
  )
187
207
 
188
208
  # If they have set an Azure OpenAI or Ollama model, then we use it
189
- completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
209
+ completion_function_params["model"] = self._adjust_model_for_provider(completion_function_params["model"])
190
210
 
191
211
  if self._active_async_client is not None:
192
212
  response = await self._active_async_client.chat.completions.create(**completion_function_params)
@@ -236,7 +256,7 @@ This attribute is observed by the websocket provider to push the error to the cl
236
256
  model, messages, True, response_format_info
237
257
  )
238
258
 
239
- completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
259
+ completion_function_params["model"] = self._adjust_model_for_provider(completion_function_params["model"])
240
260
 
241
261
  try:
242
262
  if self._active_async_client is not None:
@@ -27,6 +27,7 @@ from mito_ai.completions.models import (
27
27
  ResponseFormatInfo,
28
28
  )
29
29
  from mito_ai.utils.litellm_utils import is_litellm_configured
30
+ from mito_ai.enterprise.utils import is_abacus_configured
30
31
  from mito_ai.utils.telemetry_utils import (
31
32
  MITO_SERVER_KEY,
32
33
  USER_KEY,
@@ -79,6 +80,12 @@ This attribute is observed by the websocket provider to push the error to the cl
79
80
  # TODO: We should validate that these keys are actually valid for the provider
80
81
  # otherwise it will look like we are using the user_key when actually falling back
81
82
  # to the mito server because the key is invalid.
83
+ if is_abacus_configured():
84
+ return AICapabilities(
85
+ configuration={"model": "<dynamic>"},
86
+ provider="Abacus AI",
87
+ )
88
+
82
89
  if is_litellm_configured():
83
90
  return AICapabilities(
84
91
  configuration={"model": "<dynamic>"},
@@ -116,6 +123,9 @@ This attribute is observed by the websocket provider to push the error to the cl
116
123
  # TODO: We should validate that these keys are actually valid for the provider
117
124
  # otherwise it will look like we are using the user_key when actually falling back
118
125
  # to the mito server because the key is invalid.
126
+ if is_abacus_configured():
127
+ return USER_KEY
128
+
119
129
  if is_litellm_configured():
120
130
  return USER_KEY
121
131
 
@@ -172,7 +182,16 @@ This attribute is observed by the websocket provider to push the error to the cl
172
182
  # Retry loop
173
183
  for attempt in range(max_retries + 1):
174
184
  try:
175
- if model_type == "litellm":
185
+ if model_type == "abacus":
186
+ if not self._openai_client:
187
+ raise RuntimeError("OpenAI client is not initialized.")
188
+ completion = await self._openai_client.request_completions(
189
+ message_type=message_type,
190
+ messages=messages,
191
+ model=resolved_model,
192
+ response_format_info=response_format_info
193
+ )
194
+ elif model_type == "litellm":
176
195
  from mito_ai.enterprise.litellm_client import LiteLLMClient
177
196
  if not constants.LITELLM_BASE_URL:
178
197
  raise ValueError("LITELLM_BASE_URL is required for LiteLLM models")
@@ -299,7 +318,20 @@ This attribute is observed by the websocket provider to push the error to the cl
299
318
  ))
300
319
 
301
320
  try:
302
- if model_type == "litellm":
321
+ if model_type == "abacus":
322
+ if not self._openai_client:
323
+ raise RuntimeError("OpenAI client is not initialized.")
324
+ accumulated_response = await self._openai_client.stream_completions(
325
+ message_type=message_type,
326
+ messages=messages,
327
+ model=resolved_model,
328
+ message_id=message_id,
329
+ thread_id=thread_id,
330
+ reply_fn=reply_fn,
331
+ user_input=user_input,
332
+ response_format_info=response_format_info
333
+ )
334
+ elif model_type == "litellm":
303
335
  from mito_ai.enterprise.litellm_client import LiteLLMClient
304
336
  if not constants.LITELLM_BASE_URL:
305
337
  raise ValueError("LITELLM_BASE_URL is required for LiteLLM models")
@@ -23,7 +23,8 @@ PROVIDER_TEST_CASES = [
23
23
  ("gpt-4.1", "mito_ai.provider_manager.OpenAIClient"),
24
24
  ("claude-sonnet-4-5-20250929", "mito_ai.provider_manager.AnthropicClient"),
25
25
  ("gemini-3-flash-preview", "mito_ai.provider_manager.GeminiClient"),
26
- ("openai/gpt-4o", "mito_ai.provider_manager.LiteLLMClient"), # LiteLLM test case
26
+ ("litellm/openai/gpt-4o", "mito_ai.provider_manager.LiteLLMClient"), # LiteLLM test case
27
+ ("Abacus/gpt-4.1", "mito_ai.provider_manager.OpenAIClient"), # Abacus test case (uses OpenAIClient)
27
28
  ]
28
29
 
29
30
  @pytest.mark.parametrize("selected_model,client_patch_path", PROVIDER_TEST_CASES)
@@ -49,13 +50,27 @@ async def test_generate_short_chat_name_uses_correct_provider_and_fast_model(
49
50
  # Patch constants both at the source and where they're imported in model_utils
50
51
  monkeypatch.setattr("mito_ai.constants.LITELLM_BASE_URL", "https://litellm-server.com")
51
52
  monkeypatch.setattr("mito_ai.constants.LITELLM_API_KEY", "fake-litellm-key")
52
- monkeypatch.setattr("mito_ai.constants.LITELLM_MODELS", ["openai/gpt-4o", "anthropic/claude-3-5-sonnet"])
53
+ monkeypatch.setattr("mito_ai.constants.LITELLM_MODELS", ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"])
53
54
  # Also patch where constants is imported in model_utils (where get_available_models uses it)
54
55
  monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_BASE_URL", "https://litellm-server.com")
55
- monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_MODELS", ["openai/gpt-4o", "anthropic/claude-3-5-sonnet"])
56
+ monkeypatch.setattr("mito_ai.utils.model_utils.constants.LITELLM_MODELS", ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"])
56
57
  # Mock is_enterprise to return True so LiteLLM models are available
57
58
  monkeypatch.setattr("mito_ai.utils.version_utils.is_enterprise", lambda: True)
58
59
 
60
+ # Set up Abacus constants if testing Abacus
61
+ if selected_model.startswith("Abacus/"):
62
+ # Patch constants both at the source and where they're imported in model_utils
63
+ monkeypatch.setattr("mito_ai.constants.ABACUS_BASE_URL", "https://routellm.abacus.ai/v1")
64
+ monkeypatch.setattr("mito_ai.constants.ABACUS_API_KEY", "fake-abacus-key")
65
+ monkeypatch.setattr("mito_ai.constants.ABACUS_MODELS", ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"])
66
+ # Also patch where constants is imported in model_utils (where get_available_models uses it)
67
+ monkeypatch.setattr("mito_ai.utils.model_utils.constants.ABACUS_BASE_URL", "https://routellm.abacus.ai/v1")
68
+ monkeypatch.setattr("mito_ai.utils.model_utils.constants.ABACUS_MODELS", ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"])
69
+ # Mock is_abacus_configured to return True so Abacus models are available
70
+ monkeypatch.setattr("mito_ai.utils.model_utils.is_abacus_configured", lambda: True)
71
+ # Mock is_enterprise to return True so enterprise models are available
72
+ monkeypatch.setattr("mito_ai.utils.version_utils.is_enterprise", lambda: True)
73
+
59
74
  # Create mock client for the specific provider being tested
60
75
  mock_client = MagicMock()
61
76
  mock_client.request_completions = AsyncMock(return_value="Test Chat Name")
@@ -87,12 +102,28 @@ async def test_generate_short_chat_name_uses_correct_provider_and_fast_model(
87
102
  # Patch LiteLLMClient where it's defined (it's imported inside request_completions)
88
103
  # Also patch get_available_models to return LiteLLM models
89
104
  with patch("mito_ai.enterprise.litellm_client.LiteLLMClient", return_value=mock_client), \
90
- patch("mito_ai.provider_manager.get_available_models", return_value=["openai/gpt-4o", "anthropic/claude-3-5-sonnet"]):
105
+ patch("mito_ai.provider_manager.get_available_models", return_value=["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]):
106
+ result = await generate_short_chat_name(
107
+ user_message="What is the capital of France?",
108
+ assistant_message="The capital of France is Paris.",
109
+ llm_provider=llm_provider
110
+ )
111
+ elif selected_model.startswith("Abacus/"):
112
+ # For Abacus, it uses OpenAIClient, so patch the instance's method
113
+ # Also patch get_available_models to return Abacus models
114
+ assert llm_provider._openai_client is not None, "OpenAI client should be initialized for Abacus"
115
+ with patch.object(llm_provider._openai_client, 'request_completions', new_callable=AsyncMock, return_value="Test Chat Name") as mock_abacus_request, \
116
+ patch("mito_ai.provider_manager.get_available_models", return_value=["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"]):
91
117
  result = await generate_short_chat_name(
92
118
  user_message="What is the capital of France?",
93
119
  assistant_message="The capital of France is Paris.",
94
120
  llm_provider=llm_provider
95
121
  )
122
+ # Verify that the OpenAI client's request_completions was called (Abacus uses OpenAIClient)
123
+ mock_abacus_request.assert_called_once() # type: ignore
124
+ # As a double check, if we have used the correct client, then we must get the correct result
125
+ assert result == "Test Chat Name"
126
+ return
96
127
  else: # OpenAI
97
128
  # For OpenAI, patch the instance's method since the client is created in __init__
98
129
  assert llm_provider._openai_client is not None, "OpenAI client should be initialized"
@@ -104,17 +104,16 @@ def test_prepare_request_data_and_headers_null_message() -> None:
104
104
  with patch("mito_ai.utils.open_ai_utils.get_user_field") as mock_get_user_field:
105
105
  mock_get_user_field.side_effect = ["test@example.com", "user123"]
106
106
 
107
- with patch("mito_ai.utils.open_ai_utils.check_mito_server_quota"):
108
- data, _ = _prepare_request_data_and_headers(
109
- last_message_content=None,
110
- ai_completion_data={},
111
- timeout=30,
112
- max_retries=3,
113
- message_type=MessageType.CHAT
114
- )
115
-
116
- # Verify empty string is used for null message
117
- assert data["user_input"] == ""
107
+ data, _ = _prepare_request_data_and_headers(
108
+ last_message_content=None,
109
+ ai_completion_data={},
110
+ timeout=30,
111
+ max_retries=3,
112
+ message_type=MessageType.CHAT
113
+ )
114
+
115
+ # Verify empty string is used for null message
116
+ assert data["user_input"] == ""
118
117
 
119
118
  def test_prepare_request_data_and_headers_caches_user_info() -> None:
120
119
  """Test that user info is cached after first call"""
@@ -125,28 +124,27 @@ def test_prepare_request_data_and_headers_caches_user_info() -> None:
125
124
 
126
125
  mock_get_user_field.side_effect = ["test@example.com", "user123"]
127
126
 
128
- with patch("mito_ai.utils.open_ai_utils.check_mito_server_quota"):
129
- # First call
130
- data1, _ = _prepare_request_data_and_headers(
131
- last_message_content="test",
132
- ai_completion_data={},
133
- timeout=30,
134
- max_retries=3,
135
- message_type=MessageType.CHAT
136
- )
137
-
138
- # Second call
139
- data2, _ = _prepare_request_data_and_headers(
140
- last_message_content="test",
141
- ai_completion_data={},
142
- timeout=30,
143
- max_retries=3,
144
- message_type=MessageType.CHAT
145
- )
146
-
147
- # Verify get_user_field was only called twice (once for email, once for user_id)
148
- assert mock_get_user_field.call_count == 2
149
-
150
- # Verify both calls return same user info
151
- assert data1["email"] == data2["email"] == "test@example.com"
152
- assert data1["user_id"] == data2["user_id"] == "user123"
127
+ # First call
128
+ data1, _ = _prepare_request_data_and_headers(
129
+ last_message_content="test",
130
+ ai_completion_data={},
131
+ timeout=30,
132
+ max_retries=3,
133
+ message_type=MessageType.CHAT
134
+ )
135
+
136
+ # Second call
137
+ data2, _ = _prepare_request_data_and_headers(
138
+ last_message_content="test",
139
+ ai_completion_data={},
140
+ timeout=30,
141
+ max_retries=3,
142
+ message_type=MessageType.CHAT
143
+ )
144
+
145
+ # Verify get_user_field was only called twice (once for email, once for user_id)
146
+ assert mock_get_user_field.call_count == 2
147
+
148
+ # Verify both calls return same user info
149
+ assert data1["email"] == data2["email"] == "test@example.com"
150
+ assert data1["user_id"] == data2["user_id"] == "user123"
@@ -176,11 +176,11 @@ class TestAzureOpenAIClientCreation:
176
176
  openai_client = OpenAIClient(config=provider_config)
177
177
 
178
178
  # Test with gpt-4.1 model
179
- resolved_model = openai_client._adjust_model_for_azure_or_ollama("gpt-4.1")
179
+ resolved_model = openai_client._adjust_model_for_provider("gpt-4.1")
180
180
  assert resolved_model == FAKE_AZURE_MODEL
181
181
 
182
182
  # Test with any other model
183
- resolved_model = openai_client._adjust_model_for_azure_or_ollama("gpt-3.5-turbo")
183
+ resolved_model = openai_client._adjust_model_for_provider("gpt-3.5-turbo")
184
184
  assert resolved_model == FAKE_AZURE_MODEL
185
185
 
186
186
 
@@ -7,6 +7,7 @@ from mito_ai.constants import (
7
7
  ACTIVE_BASE_URL, MITO_PROD_BASE_URL, MITO_DEV_BASE_URL,
8
8
  MITO_STREAMLIT_DEV_BASE_URL, MITO_STREAMLIT_TEST_BASE_URL, ACTIVE_STREAMLIT_BASE_URL,
9
9
  COGNITO_CONFIG_DEV, ACTIVE_COGNITO_CONFIG,
10
+ parse_comma_separated_models,
10
11
  )
11
12
 
12
13
 
@@ -45,3 +46,92 @@ def test_cognito_config() -> Any:
45
46
 
46
47
  assert COGNITO_CONFIG_DEV == expected_config
47
48
  assert ACTIVE_COGNITO_CONFIG == COGNITO_CONFIG_DEV
49
+
50
+
51
+ class TestParseCommaSeparatedModels:
52
+ """Tests for parse_comma_separated_models helper function."""
53
+
54
+ def test_parse_models_no_quotes(self) -> None:
55
+ """Test parsing models without quotes."""
56
+ models_str = "litellm/openai/gpt-4o,litellm/anthropic/claude-3-5-sonnet"
57
+ result = parse_comma_separated_models(models_str)
58
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
59
+
60
+ def test_parse_models_double_quotes(self) -> None:
61
+ """Test parsing models with double quotes."""
62
+ # Entire string quoted
63
+ models_str = '"litellm/openai/gpt-4o,litellm/anthropic/claude-3-5-sonnet"'
64
+ result = parse_comma_separated_models(models_str)
65
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
66
+
67
+ # Individual models quoted
68
+ models_str = '"litellm/openai/gpt-4o","litellm/anthropic/claude-3-5-sonnet"'
69
+ result = parse_comma_separated_models(models_str)
70
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
71
+
72
+ def test_parse_models_single_quotes(self) -> None:
73
+ """Test parsing models with single quotes."""
74
+ # Entire string quoted
75
+ models_str = "'litellm/openai/gpt-4o,litellm/anthropic/claude-3-5-sonnet'"
76
+ result = parse_comma_separated_models(models_str)
77
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
78
+
79
+ # Individual models quoted
80
+ models_str = "'litellm/openai/gpt-4o','litellm/anthropic/claude-3-5-sonnet'"
81
+ result = parse_comma_separated_models(models_str)
82
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
83
+
84
+ def test_parse_models_mixed_quotes(self) -> None:
85
+ """Test parsing models where some have single quotes and some have double quotes."""
86
+ # Some models with single quotes, some with double quotes
87
+ models_str = "'litellm/openai/gpt-4o',\"litellm/anthropic/claude-3-5-sonnet\""
88
+ result = parse_comma_separated_models(models_str)
89
+ # Should strip both types of quotes
90
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
91
+
92
+ def test_parse_models_with_whitespace(self) -> None:
93
+ """Test parsing models with whitespace around commas and model names."""
94
+ models_str = " litellm/openai/gpt-4o , litellm/anthropic/claude-3-5-sonnet "
95
+ result = parse_comma_separated_models(models_str)
96
+ assert result == ["litellm/openai/gpt-4o", "litellm/anthropic/claude-3-5-sonnet"]
97
+
98
+ def test_parse_models_empty_string(self) -> None:
99
+ """Test parsing empty string."""
100
+ result = parse_comma_separated_models("")
101
+ assert result == []
102
+
103
+ def test_parse_models_single_model(self) -> None:
104
+ """Test parsing single model."""
105
+ models_str = "litellm/openai/gpt-4o"
106
+ result = parse_comma_separated_models(models_str)
107
+ assert result == ["litellm/openai/gpt-4o"]
108
+
109
+ # With quotes
110
+ models_str = '"litellm/openai/gpt-4o"'
111
+ result = parse_comma_separated_models(models_str)
112
+ assert result == ["litellm/openai/gpt-4o"]
113
+
114
+ def test_parse_models_abacus_format(self) -> None:
115
+ """Test parsing Abacus model format."""
116
+ models_str = "Abacus/gpt-4.1,Abacus/claude-haiku-4-5-20251001"
117
+ result = parse_comma_separated_models(models_str)
118
+ assert result == ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"]
119
+
120
+ # With quotes
121
+ models_str = '"Abacus/gpt-4.1","Abacus/claude-haiku-4-5-20251001"'
122
+ result = parse_comma_separated_models(models_str)
123
+ assert result == ["Abacus/gpt-4.1", "Abacus/claude-haiku-4-5-20251001"]
124
+
125
+ @pytest.mark.parametrize("models_str,description", [
126
+ ('"model1,model2"', 'Double quotes, no space after comma'),
127
+ ("'model1,model2'", 'Single quotes, no space after comma'),
128
+ ("model1,model2", 'No quotes, no space after comma'),
129
+ ('"model1, model2"', 'Double quotes, space after comma'),
130
+ ("'model1, model2'", 'Single quotes, space after comma'),
131
+ ("model1, model2", 'No quotes, space after comma'),
132
+ ])
133
+ def test_parse_models_all_scenarios(self, models_str: str, description: str) -> None:
134
+ """Test all specific scenarios: quotes with and without spaces after commas."""
135
+ expected = ["model1", "model2"]
136
+ result = parse_comma_separated_models(models_str)
137
+ assert result == expected, f"Failed for {description}: {repr(models_str)}"