mito-ai 0.1.58__py3-none-any.whl → 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. mito_ai/__init__.py +5 -2
  2. mito_ai/_version.py +1 -1
  3. mito_ai/completions/prompt_builders/agent_system_message.py +7 -1
  4. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  5. mito_ai/completions/prompt_builders/prompt_constants.py +17 -0
  6. mito_ai/constants.py +25 -3
  7. mito_ai/enterprise/litellm_client.py +12 -5
  8. mito_ai/enterprise/utils.py +16 -2
  9. mito_ai/openai_client.py +26 -6
  10. mito_ai/provider_manager.py +34 -2
  11. mito_ai/rules/handlers.py +46 -12
  12. mito_ai/rules/utils.py +170 -6
  13. mito_ai/tests/message_history/test_generate_short_chat_name.py +35 -4
  14. mito_ai/tests/open_ai_utils_test.py +34 -36
  15. mito_ai/tests/providers/test_azure.py +2 -2
  16. mito_ai/tests/providers/test_providers.py +5 -5
  17. mito_ai/tests/rules/rules_test.py +100 -4
  18. mito_ai/tests/test_constants.py +90 -0
  19. mito_ai/tests/test_enterprise_mode.py +55 -0
  20. mito_ai/tests/test_model_utils.py +116 -25
  21. mito_ai/utils/anthropic_utils.py +1 -2
  22. mito_ai/utils/model_utils.py +130 -53
  23. mito_ai/utils/open_ai_utils.py +29 -33
  24. mito_ai/utils/provider_utils.py +13 -7
  25. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  26. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  27. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  28. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js → mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dccfa541c464ee0e5cd4.js +1064 -175
  29. mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dccfa541c464ee0e5cd4.js.map +1 -0
  30. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js → mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_css-loader_dist_cjs_js_style_base_css.3594c54c9d209e1ed56e.js +2 -460
  31. mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_css-loader_dist_cjs_js_style_base_css.3594c54c9d209e1ed56e.js.map +1 -0
  32. mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_sourceMaps_j-49e54d.3972dd8e7542bba478ad.js +463 -0
  33. mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_sourceMaps_j-49e54d.3972dd8e7542bba478ad.js.map +1 -0
  34. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js → mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9735d9bfc8891147fee0.js +6 -6
  35. mito_ai-0.1.60.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9735d9bfc8891147fee0.js.map +1 -0
  36. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +78 -78
  37. {mito_ai-0.1.58.dist-info → mito_ai-0.1.60.dist-info}/METADATA +1 -1
  38. {mito_ai-0.1.58.dist-info → mito_ai-0.1.60.dist-info}/RECORD +61 -59
  39. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +0 -1
  40. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +0 -1
  41. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -1
  42. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  43. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  44. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  45. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  46. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  47. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  48. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  49. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  50. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  51. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  52. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  53. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  54. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  55. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  56. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  57. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  58. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  59. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  60. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  61. {mito_ai-0.1.58.data → mito_ai-0.1.60.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  62. {mito_ai-0.1.58.dist-info → mito_ai-0.1.60.dist-info}/WHEEL +0 -0
  63. {mito_ai-0.1.58.dist-info → mito_ai-0.1.60.dist-info}/entry_points.txt +0 -0
  64. {mito_ai-0.1.58.dist-info → mito_ai-0.1.60.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py CHANGED
@@ -9,6 +9,7 @@ from mito_ai.completions.message_history import GlobalMessageHistory
9
9
  from mito_ai.app_deploy.handlers import AppDeployHandler
10
10
  from mito_ai.log.urls import get_log_urls
11
11
  from mito_ai.utils.litellm_utils import is_litellm_configured
12
+ from mito_ai.enterprise.utils import is_abacus_configured
12
13
  from mito_ai.version_check import VersionCheckHandler
13
14
  from mito_ai.db.urls import get_db_urls
14
15
  from mito_ai.settings.urls import get_settings_urls
@@ -101,10 +102,12 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
101
102
 
102
103
  web_app.add_handlers(host_pattern, handlers)
103
104
 
104
- # Log enterprise mode status and LiteLLM configuration
105
+ # Log enterprise mode status and router configuration
105
106
  if is_enterprise():
106
107
  server_app.log.info("Enterprise mode enabled")
107
- if is_litellm_configured():
108
+ if is_abacus_configured():
109
+ server_app.log.info(f"Abacus AI configured: endpoint={constants.ABACUS_BASE_URL}, models={constants.ABACUS_MODELS}")
110
+ elif is_litellm_configured():
108
111
  server_app.log.info(f"LiteLLM configured: endpoint={constants.LITELLM_BASE_URL}, models={constants.LITELLM_MODELS}")
109
112
 
110
113
  server_app.log.info("Loaded the mito_ai server extension")
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.58'
4
+ __version__ = VERSION = '0.1.60'
@@ -10,6 +10,7 @@ from mito_ai.completions.prompt_builders.prompt_constants import (
10
10
  get_database_rules
11
11
  )
12
12
  from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
13
+ from mito_ai.rules.utils import get_default_rules_content
13
14
 
14
15
  def create_agent_system_message_prompt(isChromeBrowser: bool) -> str:
15
16
 
@@ -474,7 +475,12 @@ Important information:
474
475
 
475
476
  # Database rules
476
477
  sections.append(SG.Generic("Database Rules", get_database_rules()))
477
-
478
+
479
+ # Default rules
480
+ default_rules = get_default_rules_content()
481
+ if default_rules:
482
+ sections.append(SG.Generic("Default (User Defined) Rules", default_rules))
483
+
478
484
  # RULES OF YOUR WORKING PROCESS
479
485
  sections.append(SG.Generic("Rules Of Working Process", f"""The user is going to ask you to guide them as through the process of completing a task. You will help them complete a task over the course of an entire conversation with them. The user will first share with you what they want to accomplish. You will then use a tool to execute the first step of the task, they will execute the tool and return to you the updated notebook state with you, and then you will give them the next step of the task. You will continue to give them the next step of the task until they have completed the task.
480
486
 
@@ -11,6 +11,7 @@ from mito_ai.completions.prompt_builders.prompt_constants import (
11
11
  get_database_rules
12
12
  )
13
13
  from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
14
+ from mito_ai.rules.utils import get_default_rules_content
14
15
 
15
16
  def create_chat_system_message_prompt() -> str:
16
17
  sections: List[PromptSection] = []
@@ -34,6 +35,9 @@ Other useful information:
34
35
 
35
36
  sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
36
37
  sections.append(SG.Generic("DatabaseRules", get_database_rules()))
38
+ default_rules = get_default_rules_content()
39
+ if default_rules:
40
+ sections.append(SG.Generic("Default (User Defined) Rules", default_rules))
37
41
  sections.append(SG.Generic("Citation Rules", CITATION_RULES))
38
42
  sections.append(SG.Generic("Cell Reference Rules", CELL_REFERENCE_RULES))
39
43
 
@@ -20,10 +20,23 @@ Rules:
20
20
  - All imports must appear at the top, before the chart configuration section.
21
21
  - Variables with multiple words should be underscore-separated.
22
22
  - All colors should be in hex format (e.g., "#3498db"). Use quotes around the hex string: COLOR = "#3498db" or COLOR = '#3498db'. Do NOT nest quotes.
23
+ - Never use RGB/RGBA tuples/lists for colors (e.g. (0, 0.4, 0.8, 0.8) is forbidden).
24
+ - If transparency is needed, store it separately as ALPHA = 0.8 and apply it in code (e.g. to_rgba(HEX_COLOR, ALPHA)).
23
25
  - Variables can only be strings, numbers, booleans, tuples, or lists.
24
26
  - NEVER include comments on the same line as a variable assignment. Each variable assignment must be on its own line with no trailing comments.
25
27
  - For string values, use either single or double quotes (e.g., TITLE = "Sales by Product" or TITLE = 'Sales by Product'). Do not use nested quotes (e.g., do NOT use '"value"').
26
28
 
29
+ Fixed acceptable ranges (matplotlib constraints):
30
+ - For numeric variables that have a fixed acceptable range, add a line immediately BEFORE the variable assignment: # RANGE VARIABLE_NAME MIN MAX
31
+ - This allows the Chart Wizard to clamp inputs and prevent invalid values. Use the following ranges when you use these variables:
32
+ - ALPHA (opacity): 0 1
33
+ - FIGURE_SIZE (tuple width, height in inches): 1 24 (each element)
34
+ - LINE_WIDTH, LINEWIDTH, LWD: 0 20
35
+ - FONT_SIZE, FONTSIZE, FONT_SIZE_TITLE, FONT_SIZE_LABEL: 0.1 72
36
+ - MARKER_SIZE, MARKERSIZE, S: 0 1000
37
+ - DPI: 1 600
38
+ - Any other numeric or tuple variable that you know has matplotlib constraints: add # RANGE VARIABLE_NAME MIN MAX with the appropriate min and max.
39
+
27
40
  Common Mistakes to Avoid:
28
41
  - WRONG: COLOR = '"#1877F2" # Meta Blue' (nested quotes and inline comment)
29
42
  - WRONG: COLOR = "#1877F2" # Meta Blue (inline comment)
@@ -36,6 +49,10 @@ TITLE = "Sales by Product"
36
49
  X_LABEL = "Product"
37
50
  Y_LABEL = "Sales"
38
51
  BAR_COLOR = "#000000"
52
+ # RANGE ALPHA 0 1
53
+ ALPHA = 0.8
54
+ # RANGE FIGURE_SIZE 1 24
55
+ FIGURE_SIZE = (12, 6)
39
56
  # === END CONFIG ===
40
57
  """
41
58
 
mito_ai/constants.py CHANGED
@@ -2,7 +2,7 @@
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
4
  import os
5
- from typing import Union
5
+ from typing import Union, List
6
6
 
7
7
  # Claude
8
8
  ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
@@ -23,12 +23,34 @@ AZURE_OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION")
23
23
  AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
24
24
  AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL")
25
25
 
26
+ def parse_comma_separated_models(models_str: str) -> List[str]:
27
+ """
28
+ Parse a comma-separated string of model names into a list.
29
+ Handles quoted and unquoted values, stripping whitespace and quotes.
30
+
31
+ Args:
32
+ models_str: Comma-separated string of model names (e.g., "model1,model2" or '"model1","model2"')
33
+
34
+ Returns:
35
+ List of model names with whitespace and quotes stripped
36
+ """
37
+ if not models_str:
38
+ return []
39
+ return [model.strip().strip('"\'') for model in models_str.split(",") if model.strip()]
40
+
26
41
  # LiteLLM Config (Enterprise mode only)
27
42
  LITELLM_BASE_URL = os.environ.get("LITELLM_BASE_URL")
28
43
  LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
29
44
  LITELLM_MODELS_STR = os.environ.get("LITELLM_MODELS", "")
30
- # Parse comma-separated string into list, strip whitespace
31
- LITELLM_MODELS = [model.strip() for model in LITELLM_MODELS_STR.split(",") if model.strip()] if LITELLM_MODELS_STR else []
45
+ # Parse comma-separated string into list, strip whitespace and quotes
46
+ LITELLM_MODELS = parse_comma_separated_models(LITELLM_MODELS_STR)
47
+
48
+ # Abacus AI Config (Enterprise mode only)
49
+ ABACUS_BASE_URL = os.environ.get("ABACUS_BASE_URL")
50
+ ABACUS_API_KEY = os.environ.get("ABACUS_API_KEY")
51
+ ABACUS_MODELS_STR = os.environ.get("ABACUS_MODELS", "")
52
+ # Parse comma-separated string into list, strip whitespace and quotes
53
+ ABACUS_MODELS = parse_comma_separated_models(ABACUS_MODELS_STR)
32
54
 
33
55
  # Mito AI Base URLs and Endpoint Paths
34
56
  MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v2"
@@ -11,6 +11,7 @@ from mito_ai.completions.models import (
11
11
  CompletionItem,
12
12
  )
13
13
  from mito_ai.utils.litellm_utils import get_litellm_completion_function_params
14
+ from mito_ai.utils.model_utils import strip_router_prefix
14
15
  import litellm
15
16
 
16
17
  class LiteLLMClient:
@@ -28,7 +29,7 @@ class LiteLLMClient:
28
29
  async def request_completions(
29
30
  self,
30
31
  messages: List[ChatCompletionMessageParam],
31
- model: str, # Should include provider prefix (e.g., "openai/gpt-4o")
32
+ model: str, # Should include provider prefix (e.g., "LiteLLM/openai/gpt-4o")
32
33
  response_format_info: Optional[ResponseFormatInfo] = None,
33
34
  message_type: MessageType = MessageType.CHAT
34
35
  ) -> str:
@@ -37,16 +38,19 @@ class LiteLLMClient:
37
38
 
38
39
  Args:
39
40
  messages: List of chat messages
40
- model: Model name with provider prefix (e.g., "openai/gpt-4o")
41
+ model: Model name with router and provider prefix (e.g., "LiteLLM/openai/gpt-4o")
41
42
  response_format_info: Optional response format specification
42
43
  message_type: Type of message (chat, agent execution, etc.)
43
44
 
44
45
  Returns:
45
46
  The completion text response
46
47
  """
48
+ # Strip router prefix if present (LiteLLM/ prefix)
49
+ model_for_litellm = strip_router_prefix(model)
50
+
47
51
  # Prepare parameters for LiteLLM
48
52
  params = get_litellm_completion_function_params(
49
- model=model,
53
+ model=model_for_litellm,
50
54
  messages=messages,
51
55
  api_key=self.api_key,
52
56
  api_base=self.base_url,
@@ -82,7 +86,7 @@ class LiteLLMClient:
82
86
 
83
87
  Args:
84
88
  messages: List of chat messages
85
- model: Model name with provider prefix (e.g., "openai/gpt-4o")
89
+ model: Model name with router and provider prefix (e.g., "LiteLLM/openai/gpt-4o")
86
90
  message_type: Type of message (chat, agent execution, etc.)
87
91
  message_id: ID of the message being processed
88
92
  reply_fn: Function to call with each chunk for streaming replies
@@ -93,9 +97,12 @@ class LiteLLMClient:
93
97
  """
94
98
  accumulated_response = ""
95
99
 
100
+ # Strip router prefix if present (LiteLLM/ prefix)
101
+ model_for_litellm = strip_router_prefix(model)
102
+
96
103
  # Prepare parameters for LiteLLM
97
104
  params = get_litellm_completion_function_params(
98
- model=model,
105
+ model=model_for_litellm,
99
106
  messages=messages,
100
107
  api_key=self.api_key,
101
108
  api_base=self.base_url,
@@ -5,11 +5,25 @@
5
5
  # Distributed under the terms of the The Mito Enterprise license.
6
6
 
7
7
  from mito_ai.utils.version_utils import is_enterprise, is_mitosheet_private
8
- from mito_ai.constants import AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL
8
+ from mito_ai.constants import (
9
+ AZURE_OPENAI_API_KEY,
10
+ AZURE_OPENAI_ENDPOINT,
11
+ AZURE_OPENAI_API_VERSION,
12
+ AZURE_OPENAI_MODEL,
13
+ ABACUS_BASE_URL,
14
+ ABACUS_MODELS
15
+ )
9
16
 
10
17
  def is_azure_openai_configured() -> bool:
11
18
  """
12
19
  Azure OpenAI is only supported for Mito Enterprise users
13
20
  """
14
21
  is_allowed_to_use_azure = is_enterprise() or is_mitosheet_private()
15
- return all([is_allowed_to_use_azure, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL])
22
+ return all([is_allowed_to_use_azure, AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_API_VERSION, AZURE_OPENAI_MODEL])
23
+
24
+ def is_abacus_configured() -> bool:
25
+ """
26
+ Abacus AI is only supported for Mito Enterprise users.
27
+ Checks if Abacus AI is configured with base URL and models.
28
+ """
29
+ return all([is_enterprise(), ABACUS_BASE_URL, ABACUS_MODELS])
mito_ai/openai_client.py CHANGED
@@ -11,8 +11,9 @@ from traitlets import Instance, default, validate
11
11
  from traitlets.config import LoggingConfigurable
12
12
 
13
13
  from mito_ai import constants
14
- from mito_ai.enterprise.utils import is_azure_openai_configured
14
+ from mito_ai.enterprise.utils import is_azure_openai_configured, is_abacus_configured
15
15
  from mito_ai.logger import get_logger
16
+ from mito_ai.utils.model_utils import strip_router_prefix
16
17
  from mito_ai.completions.models import (
17
18
  AICapabilities,
18
19
  CompletionError,
@@ -24,12 +25,11 @@ from mito_ai.completions.models import (
24
25
  ResponseFormatInfo,
25
26
  )
26
27
  from mito_ai.utils.open_ai_utils import (
27
- check_mito_server_quota,
28
28
  get_ai_completion_from_mito_server,
29
29
  get_open_ai_completion_function_params,
30
30
  stream_ai_completion_from_mito_server,
31
31
  )
32
- from mito_ai.utils.server_limits import update_mito_server_quota
32
+ from mito_ai.utils.server_limits import update_mito_server_quota, check_mito_server_quota
33
33
 
34
34
  OPENAI_MODEL_FALLBACK = "gpt-4.1"
35
35
 
@@ -68,6 +68,14 @@ This attribute is observed by the websocket provider to push the error to the cl
68
68
  provider="Azure OpenAI",
69
69
  )
70
70
 
71
+ if is_abacus_configured():
72
+ return AICapabilities(
73
+ configuration={
74
+ "model": "<dynamic>"
75
+ },
76
+ provider="Abacus AI",
77
+ )
78
+
71
79
  if constants.OLLAMA_MODEL:
72
80
  return AICapabilities(
73
81
  configuration={
@@ -121,6 +129,10 @@ This attribute is observed by the websocket provider to push the error to the cl
121
129
  timeout=self.timeout,
122
130
  )
123
131
 
132
+ elif is_abacus_configured():
133
+ base_url = constants.ABACUS_BASE_URL
134
+ llm_api_key = constants.ABACUS_API_KEY
135
+ self.log.debug(f"Using Abacus AI with base URL: {constants.ABACUS_BASE_URL}")
124
136
  elif constants.OLLAMA_MODEL:
125
137
  base_url = constants.OLLAMA_BASE_URL
126
138
  llm_api_key = "ollama"
@@ -141,17 +153,25 @@ This attribute is observed by the websocket provider to push the error to the cl
141
153
  )
142
154
  return client
143
155
 
144
- def _adjust_model_for_azure_or_ollama(self, model: str) -> str:
156
+ def _adjust_model_for_provider(self, model: str) -> str:
145
157
 
146
158
  # If they have set an Azure OpenAI model, then we always use it
147
159
  if is_azure_openai_configured() and constants.AZURE_OPENAI_MODEL is not None:
148
160
  self.log.debug(f"Resolving to Azure OpenAI model: {constants.AZURE_OPENAI_MODEL}")
161
+ # TODO: We should update Azure so it works the way LiteLLM and Abacus do:
162
+ # when configured, we only show models from Azure in the UI.
149
163
  return constants.AZURE_OPENAI_MODEL
150
164
 
151
165
  # If they have set an Ollama model, then we use it
152
166
  if constants.OLLAMA_MODEL is not None:
153
167
  return constants.OLLAMA_MODEL
154
168
 
169
+ # If using Abacus, strip the "Abacus/" prefix from the model name
170
+ if is_abacus_configured() and model.lower().startswith('abacus/'):
171
+ stripped_model = strip_router_prefix(model)
172
+ self.log.debug(f"Stripping Abacus prefix: {model} -> {stripped_model}")
173
+ return stripped_model
174
+
155
175
  # Otherwise, we use the model they provided
156
176
  return model
157
177
 
@@ -186,7 +206,7 @@ This attribute is observed by the websocket provider to push the error to the cl
186
206
  )
187
207
 
188
208
  # If they have set an Azure OpenAI or Ollama model, then we use it
189
- completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
209
+ completion_function_params["model"] = self._adjust_model_for_provider(completion_function_params["model"])
190
210
 
191
211
  if self._active_async_client is not None:
192
212
  response = await self._active_async_client.chat.completions.create(**completion_function_params)
@@ -236,7 +256,7 @@ This attribute is observed by the websocket provider to push the error to the cl
236
256
  model, messages, True, response_format_info
237
257
  )
238
258
 
239
- completion_function_params["model"] = self._adjust_model_for_azure_or_ollama(completion_function_params["model"])
259
+ completion_function_params["model"] = self._adjust_model_for_provider(completion_function_params["model"])
240
260
 
241
261
  try:
242
262
  if self._active_async_client is not None:
@@ -27,6 +27,7 @@ from mito_ai.completions.models import (
27
27
  ResponseFormatInfo,
28
28
  )
29
29
  from mito_ai.utils.litellm_utils import is_litellm_configured
30
+ from mito_ai.enterprise.utils import is_abacus_configured
30
31
  from mito_ai.utils.telemetry_utils import (
31
32
  MITO_SERVER_KEY,
32
33
  USER_KEY,
@@ -79,6 +80,12 @@ This attribute is observed by the websocket provider to push the error to the cl
79
80
  # TODO: We should validate that these keys are actually valid for the provider
80
81
  # otherwise it will look like we are using the user_key when actually falling back
81
82
  # to the mito server because the key is invalid.
83
+ if is_abacus_configured():
84
+ return AICapabilities(
85
+ configuration={"model": "<dynamic>"},
86
+ provider="Abacus AI",
87
+ )
88
+
82
89
  if is_litellm_configured():
83
90
  return AICapabilities(
84
91
  configuration={"model": "<dynamic>"},
@@ -116,6 +123,9 @@ This attribute is observed by the websocket provider to push the error to the cl
116
123
  # TODO: We should validate that these keys are actually valid for the provider
117
124
  # otherwise it will look like we are using the user_key when actually falling back
118
125
  # to the mito server because the key is invalid.
126
+ if is_abacus_configured():
127
+ return USER_KEY
128
+
119
129
  if is_litellm_configured():
120
130
  return USER_KEY
121
131
 
@@ -172,7 +182,16 @@ This attribute is observed by the websocket provider to push the error to the cl
172
182
  # Retry loop
173
183
  for attempt in range(max_retries + 1):
174
184
  try:
175
- if model_type == "litellm":
185
+ if model_type == "abacus":
186
+ if not self._openai_client:
187
+ raise RuntimeError("OpenAI client is not initialized.")
188
+ completion = await self._openai_client.request_completions(
189
+ message_type=message_type,
190
+ messages=messages,
191
+ model=resolved_model,
192
+ response_format_info=response_format_info
193
+ )
194
+ elif model_type == "litellm":
176
195
  from mito_ai.enterprise.litellm_client import LiteLLMClient
177
196
  if not constants.LITELLM_BASE_URL:
178
197
  raise ValueError("LITELLM_BASE_URL is required for LiteLLM models")
@@ -299,7 +318,20 @@ This attribute is observed by the websocket provider to push the error to the cl
299
318
  ))
300
319
 
301
320
  try:
302
- if model_type == "litellm":
321
+ if model_type == "abacus":
322
+ if not self._openai_client:
323
+ raise RuntimeError("OpenAI client is not initialized.")
324
+ accumulated_response = await self._openai_client.stream_completions(
325
+ message_type=message_type,
326
+ messages=messages,
327
+ model=resolved_model,
328
+ message_id=message_id,
329
+ thread_id=thread_id,
330
+ reply_fn=reply_fn,
331
+ user_input=user_input,
332
+ response_format_info=response_format_info
333
+ )
334
+ elif model_type == "litellm":
303
335
  from mito_ai.enterprise.litellm_client import LiteLLMClient
304
336
  if not constants.LITELLM_BASE_URL:
305
337
  raise ValueError("LITELLM_BASE_URL is required for LiteLLM models")
mito_ai/rules/handlers.py CHANGED
@@ -7,7 +7,16 @@ from typing import Any, Final, Union
7
7
  import tornado
8
8
  import os
9
9
  from jupyter_server.base.handlers import APIHandler
10
- from mito_ai.rules.utils import RULES_DIR_PATH, get_all_rules, get_rule, set_rules_file
10
+ from mito_ai.rules.utils import (
11
+ RULES_DIR_PATH,
12
+ cleanup_rules_metadata,
13
+ delete_rule,
14
+ get_all_rules,
15
+ get_rule,
16
+ get_rule_default,
17
+ set_rule_default,
18
+ set_rules_file,
19
+ )
11
20
 
12
21
 
13
22
  class RulesHandler(APIHandler):
@@ -17,17 +26,26 @@ class RulesHandler(APIHandler):
17
26
  def get(self, key: Union[str, None] = None) -> None:
18
27
  """Get a specific rule by key or all rules if no key provided"""
19
28
  if key is None or key == '':
20
- # No key provided, return all rules
21
- rules = get_all_rules()
29
+ # No key provided, return all rules with is_default flag
30
+ rule_files = get_all_rules()
31
+ rules = [
32
+ {"name": name, "is_default": get_rule_default(name)}
33
+ for name in rule_files
34
+ ]
22
35
  self.finish(json.dumps(rules))
23
36
  else:
24
37
  # Key provided, return specific rule
25
- rule_content = get_rule(key)
26
- if rule_content is None:
27
- self.set_status(404)
28
- self.finish(json.dumps({"error": f"Rule with key '{key}' not found"}))
29
- else:
30
- self.finish(json.dumps({"key": key, "content": rule_content}))
38
+ try:
39
+ rule_content = get_rule(key)
40
+ if rule_content is None:
41
+ self.set_status(404)
42
+ self.finish(json.dumps({"error": f"Rule with key '{key}' not found"}))
43
+ else:
44
+ is_default = get_rule_default(key)
45
+ self.finish(json.dumps({"key": key, "content": rule_content, "is_default": is_default}))
46
+ except ValueError as e:
47
+ self.set_status(400)
48
+ self.finish(json.dumps({"error": str(e)}))
31
49
 
32
50
  @tornado.web.authenticated
33
51
  def put(self, key: str) -> None:
@@ -37,8 +55,24 @@ class RulesHandler(APIHandler):
37
55
  self.set_status(400)
38
56
  self.finish(json.dumps({"error": "Content is required"}))
39
57
  return
40
-
41
- set_rules_file(key, data['content'])
42
- self.finish(json.dumps({"status": "updated", "rules file ": key}))
43
58
 
59
+ try:
60
+ set_rules_file(key, data['content'])
61
+ if 'is_default' in data:
62
+ set_rule_default(key, bool(data['is_default']))
63
+ cleanup_rules_metadata()
64
+ self.finish(json.dumps({"status": "updated", "rules_file": key}))
65
+ except ValueError as e:
66
+ self.set_status(400)
67
+ self.finish(json.dumps({"error": str(e)}))
44
68
 
69
+ @tornado.web.authenticated
70
+ def delete(self, key: str) -> None:
71
+ """Delete a rule by key (rule name)."""
72
+ try:
73
+ delete_rule(key)
74
+ cleanup_rules_metadata()
75
+ self.finish(json.dumps({"status": "deleted", "key": key}))
76
+ except ValueError as e:
77
+ self.set_status(400)
78
+ self.finish(json.dumps({"error": str(e)}))