mito-ai 0.1.57__py3-none-any.whl → 0.1.59__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. mito_ai/__init__.py +19 -22
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +24 -14
  4. mito_ai/chart_wizard/handlers.py +78 -17
  5. mito_ai/chart_wizard/urls.py +8 -5
  6. mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
  7. mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
  8. mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
  9. mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
  10. mito_ai/completions/completion_handlers/completion_handler.py +3 -5
  11. mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
  12. mito_ai/completions/completion_handlers/scratchpad_result_handler.py +6 -8
  13. mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
  14. mito_ai/completions/completion_handlers/utils.py +3 -7
  15. mito_ai/completions/handlers.py +32 -22
  16. mito_ai/completions/message_history.py +8 -10
  17. mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
  18. mito_ai/completions/prompt_builders/prompt_constants.py +2 -0
  19. mito_ai/constants.py +31 -2
  20. mito_ai/enterprise/__init__.py +1 -1
  21. mito_ai/enterprise/litellm_client.py +144 -0
  22. mito_ai/enterprise/utils.py +16 -2
  23. mito_ai/log/handlers.py +1 -1
  24. mito_ai/openai_client.py +36 -96
  25. mito_ai/provider_manager.py +420 -0
  26. mito_ai/settings/enterprise_handler.py +26 -0
  27. mito_ai/settings/urls.py +2 -0
  28. mito_ai/streamlit_conversion/agent_utils.py +2 -30
  29. mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
  30. mito_ai/streamlit_preview/handlers.py +6 -3
  31. mito_ai/streamlit_preview/urls.py +5 -3
  32. mito_ai/tests/message_history/test_generate_short_chat_name.py +103 -28
  33. mito_ai/tests/open_ai_utils_test.py +34 -36
  34. mito_ai/tests/providers/test_anthropic_client.py +174 -16
  35. mito_ai/tests/providers/test_azure.py +15 -15
  36. mito_ai/tests/providers/test_capabilities.py +14 -17
  37. mito_ai/tests/providers/test_gemini_client.py +14 -13
  38. mito_ai/tests/providers/test_model_resolution.py +145 -89
  39. mito_ai/tests/providers/test_openai_client.py +209 -13
  40. mito_ai/tests/providers/test_provider_limits.py +5 -5
  41. mito_ai/tests/providers/test_providers.py +229 -51
  42. mito_ai/tests/providers/test_retry_logic.py +13 -22
  43. mito_ai/tests/providers/utils.py +4 -4
  44. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
  45. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
  46. mito_ai/tests/test_constants.py +90 -0
  47. mito_ai/tests/test_enterprise_mode.py +217 -0
  48. mito_ai/tests/test_model_utils.py +362 -0
  49. mito_ai/utils/anthropic_utils.py +8 -6
  50. mito_ai/utils/gemini_utils.py +0 -3
  51. mito_ai/utils/litellm_utils.py +84 -0
  52. mito_ai/utils/model_utils.py +257 -0
  53. mito_ai/utils/open_ai_utils.py +29 -41
  54. mito_ai/utils/provider_utils.py +13 -29
  55. mito_ai/utils/telemetry_utils.py +14 -2
  56. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
  57. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  58. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  59. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js +1059 -144
  60. mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.44c109c7be36fb884d25.js.map +1 -0
  61. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js +17 -17
  62. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map → mito_ai-0.1.59.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.f7decebaf69618541e0f.js.map +1 -1
  63. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +78 -78
  64. {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/METADATA +2 -1
  65. {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/RECORD +90 -83
  66. mito_ai/completions/providers.py +0 -284
  67. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +0 -1
  68. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  69. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  70. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  71. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  72. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  73. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  74. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  75. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  76. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  77. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  78. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  79. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  80. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  81. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  82. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  83. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  84. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  85. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  86. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  87. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  88. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  89. {mito_ai-0.1.57.data → mito_ai-0.1.59.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  90. {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/WHEEL +0 -0
  91. {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/entry_points.txt +0 -0
  92. {mito_ai-0.1.57.dist-info → mito_ai-0.1.59.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ from typing import List
5
5
  from openai.types.chat import ChatCompletionMessageParam
6
6
  from mito_ai.completions.models import InlineCompleterMetadata, MessageType
7
7
  from mito_ai.completions.prompt_builders.inline_completer_prompt import create_inline_prompt
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from mito_ai.completions.message_history import GlobalMessageHistory
10
10
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
11
 
@@ -17,9 +17,8 @@ class InlineCompleterHandler(CompletionHandler[InlineCompleterMetadata]):
17
17
  @staticmethod
18
18
  async def get_completion(
19
19
  metadata: InlineCompleterMetadata,
20
- provider: OpenAIProvider,
21
- message_history: GlobalMessageHistory,
22
- model: str
20
+ provider: ProviderManager,
21
+ message_history: GlobalMessageHistory
23
22
  ) -> str:
24
23
  """Get an inline completion from the AI provider."""
25
24
 
@@ -37,9 +36,9 @@ class InlineCompleterHandler(CompletionHandler[InlineCompleterMetadata]):
37
36
  # Get the completion
38
37
  completion = await provider.request_completions(
39
38
  messages=messages,
40
- model=model,
41
39
  message_type=MessageType.INLINE_COMPLETION,
42
- thread_id=None
40
+ thread_id=None,
41
+ use_fast_model=True
43
42
  )
44
43
 
45
44
  return completion
@@ -5,7 +5,7 @@ from typing import List, Literal, Union
5
5
  from openai.types.chat import ChatCompletionMessageParam
6
6
  from mito_ai.completions.models import ScratchpadResultMetadata, MessageType, ResponseFormatInfo, AgentResponse
7
7
  from mito_ai.completions.prompt_builders.scratchpad_result_prompt import create_scratchpad_result_prompt
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from mito_ai.completions.message_history import GlobalMessageHistory
10
10
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
11
  from mito_ai.completions.completion_handlers.utils import append_agent_system_message, create_ai_optimized_message
@@ -18,9 +18,8 @@ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
18
18
  @staticmethod
19
19
  async def get_completion(
20
20
  metadata: ScratchpadResultMetadata,
21
- provider: OpenAIProvider,
22
- message_history: GlobalMessageHistory,
23
- model: str
21
+ provider: ProviderManager,
22
+ message_history: GlobalMessageHistory
24
23
  ) -> str:
25
24
  """Get a scratchpad result completion from the AI provider."""
26
25
 
@@ -31,7 +30,7 @@ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
31
30
  )
32
31
 
33
32
  # Add the system message if it doesn't already exist
34
- await append_agent_system_message(message_history, model, provider, metadata.threadId, True)
33
+ await append_agent_system_message(message_history, provider, metadata.threadId, True)
35
34
 
36
35
  # Create the prompt
37
36
  prompt = create_scratchpad_result_prompt(metadata)
@@ -41,12 +40,11 @@ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
41
40
  new_ai_optimized_message = create_ai_optimized_message(prompt, None, None)
42
41
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
43
42
 
44
- await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
43
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
45
44
 
46
45
  # Get the completion
47
46
  completion = await provider.request_completions(
48
47
  messages=message_history.get_ai_optimized_history(metadata.threadId),
49
- model=model,
50
48
  response_format_info=ResponseFormatInfo(
51
49
  name='agent_response',
52
50
  format=AgentResponse
@@ -58,7 +56,7 @@ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
58
56
 
59
57
  ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
60
58
 
61
- await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
59
+ await message_history.append_message(ai_response_message, ai_response_message, provider, metadata.threadId)
62
60
 
63
61
  return completion
64
62
 
@@ -15,7 +15,7 @@ from mito_ai.completions.prompt_builders.smart_debug_prompt import (
15
15
  create_error_prompt,
16
16
  remove_inner_thoughts_from_message,
17
17
  )
18
- from mito_ai.completions.providers import OpenAIProvider
18
+ from mito_ai.provider_manager import ProviderManager
19
19
  from mito_ai.completions.message_history import GlobalMessageHistory
20
20
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
21
21
  from mito_ai.completions.completion_handlers.utils import append_chat_system_message
@@ -29,9 +29,8 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
29
29
  @staticmethod
30
30
  async def get_completion(
31
31
  metadata: SmartDebugMetadata,
32
- provider: OpenAIProvider,
33
- message_history: GlobalMessageHistory,
34
- model: str
32
+ provider: ProviderManager,
33
+ message_history: GlobalMessageHistory
35
34
  ) -> str:
36
35
  """Get a smart debug completion from the AI provider."""
37
36
 
@@ -43,7 +42,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
43
42
  thread_id = metadata.threadId
44
43
 
45
44
  # Add the system message if it doesn't already exist
46
- await append_chat_system_message(message_history, model, provider, thread_id)
45
+ await append_chat_system_message(message_history, provider, thread_id)
47
46
 
48
47
  # Create the prompt
49
48
  prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
@@ -53,13 +52,12 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
53
52
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
54
53
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
55
54
  await message_history.append_message(
56
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
55
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
57
56
  )
58
57
 
59
58
  # Get the completion
60
59
  completion = await provider.request_completions(
61
60
  messages=message_history.get_ai_optimized_history(thread_id),
62
- model=model,
63
61
  message_type=MessageType.SMART_DEBUG,
64
62
  user_input=error_message,
65
63
  thread_id=thread_id
@@ -78,7 +76,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
78
76
  "content": display_completion,
79
77
  }
80
78
  await message_history.append_message(
81
- ai_response_message, display_response_message, model, provider, thread_id
79
+ ai_response_message, display_response_message, provider, thread_id
82
80
  )
83
81
 
84
82
  return display_completion
@@ -86,11 +84,10 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
86
84
  @staticmethod
87
85
  async def stream_completion(
88
86
  metadata: SmartDebugMetadata,
89
- provider: OpenAIProvider,
87
+ provider: ProviderManager,
90
88
  message_history: GlobalMessageHistory,
91
89
  message_id: str,
92
- reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
93
- model: str
90
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]
94
91
  ) -> str:
95
92
  """Stream smart debug completions from the AI provider.
96
93
 
@@ -112,7 +109,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
112
109
  thread_id = metadata.threadId
113
110
 
114
111
  # Add the system message if it doesn't already exist
115
- await append_chat_system_message(message_history, model, provider, thread_id)
112
+ await append_chat_system_message(message_history, provider, thread_id)
116
113
 
117
114
  # Create the prompt
118
115
  prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
@@ -122,18 +119,17 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
122
119
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
123
120
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
124
121
  await message_history.append_message(
125
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
122
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
126
123
  )
127
124
 
128
125
  # Stream the completions using the provider's stream method
129
126
  accumulated_response = await provider.stream_completions(
130
127
  message_type=MessageType.SMART_DEBUG,
131
128
  messages=message_history.get_ai_optimized_history(thread_id),
132
- model=model,
133
129
  message_id=message_id,
130
+ thread_id=thread_id,
134
131
  reply_fn=reply_fn,
135
- user_input=error_message,
136
- thread_id=thread_id
132
+ user_input=error_message
137
133
  )
138
134
 
139
135
  # Process the completion to remove inner thoughts
@@ -149,7 +145,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
149
145
  "content": display_completion,
150
146
  }
151
147
  await message_history.append_message(
152
- ai_response_message, display_response_message, model, provider, thread_id
148
+ ai_response_message, display_response_message, provider, thread_id
153
149
  )
154
150
 
155
151
  return display_completion
@@ -5,7 +5,7 @@ import base64
5
5
  from typing import Optional, Union, List, Dict, Any, cast
6
6
  from mito_ai.completions.message_history import GlobalMessageHistory
7
7
  from mito_ai.completions.models import ThreadID
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from openai.types.chat import ChatCompletionMessageParam
10
10
  from mito_ai.completions.prompt_builders.chat_system_message import (
11
11
  create_chat_system_message_prompt,
@@ -17,8 +17,7 @@ from mito_ai.completions.prompt_builders.agent_system_message import (
17
17
 
18
18
  async def append_chat_system_message(
19
19
  message_history: GlobalMessageHistory,
20
- model: str,
21
- provider: OpenAIProvider,
20
+ provider: ProviderManager,
22
21
  thread_id: ThreadID,
23
22
  ) -> None:
24
23
 
@@ -39,7 +38,6 @@ async def append_chat_system_message(
39
38
  await message_history.append_message(
40
39
  ai_optimized_message=system_message,
41
40
  display_message=system_message,
42
- model=model,
43
41
  llm_provider=provider,
44
42
  thread_id=thread_id,
45
43
  )
@@ -47,8 +45,7 @@ async def append_chat_system_message(
47
45
 
48
46
  async def append_agent_system_message(
49
47
  message_history: GlobalMessageHistory,
50
- model: str,
51
- provider: OpenAIProvider,
48
+ provider: ProviderManager,
52
49
  thread_id: ThreadID,
53
50
  isChromeBrowser: bool,
54
51
  ) -> None:
@@ -70,7 +67,6 @@ async def append_agent_system_message(
70
67
  await message_history.append_message(
71
68
  ai_optimized_message=system_message,
72
69
  display_message=system_message,
73
- model=model,
74
70
  llm_provider=provider,
75
71
  thread_id=thread_id,
76
72
  )
@@ -37,9 +37,10 @@ from mito_ai.completions.models import (
37
37
  ScratchpadResultMetadata,
38
38
  MessageType
39
39
  )
40
- from mito_ai.completions.providers import OpenAIProvider
40
+ from mito_ai.provider_manager import ProviderManager
41
41
  from mito_ai.utils.create import initialize_user
42
42
  from mito_ai.utils.version_utils import is_pro
43
+ from mito_ai.utils.model_utils import get_available_models
43
44
  from mito_ai.completions.completion_handlers.chat_completion_handler import get_chat_completion, stream_chat_completion
44
45
  from mito_ai.completions.completion_handlers.smart_debug_handler import get_smart_debug_completion, stream_smart_debug_completion
45
46
  from mito_ai.completions.completion_handlers.code_explain_handler import get_code_explain_completion, stream_code_explain_completion
@@ -49,8 +50,6 @@ from mito_ai.completions.completion_handlers.agent_auto_error_fixup_handler impo
49
50
  from mito_ai.completions.completion_handlers.scratchpad_result_handler import get_scratchpad_result_completion
50
51
  from mito_ai.utils.telemetry_utils import identify
51
52
 
52
- FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
53
-
54
53
  # The GlobalMessageHistory is now created in __init__.py and passed to handlers
55
54
  # to ensure there's only one instance managing the .mito/ai-chats directory locks
56
55
 
@@ -61,13 +60,12 @@ FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
61
60
  class CompletionHandler(JupyterHandler, WebSocketHandler):
62
61
  """Completion websocket handler."""
63
62
 
64
- def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
63
+ def initialize(self, llm: ProviderManager, message_history: GlobalMessageHistory) -> None:
65
64
  super().initialize()
66
65
  self.log.debug("Initializing websocket connection %s", self.request.path)
67
66
  self._llm = llm
68
67
  self._message_history = message_history
69
68
  self.is_pro = is_pro()
70
- self._selected_model = FALLBACK_MODEL
71
69
  self.is_electron = False
72
70
  identify(llm.key_type)
73
71
 
@@ -202,7 +200,25 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
202
200
  if type == MessageType.UPDATE_MODEL_CONFIG:
203
201
  model = metadata_dict.get('model')
204
202
  if model:
205
- self._selected_model = model
203
+ # Validate model is in allowed list
204
+ available_models = get_available_models()
205
+ if model not in available_models:
206
+ error = CompletionError(
207
+ error_type="InvalidModelConfig",
208
+ title="Invalid model configuration",
209
+ traceback="",
210
+ hint=f"Model '{model}' is not in the allowed model list. Available models: {', '.join(available_models)}"
211
+ )
212
+ reply = CompletionReply(
213
+ items=[],
214
+ error=error,
215
+ parent_id=parsed_message.get('message_id')
216
+ )
217
+ self.reply(reply)
218
+ return
219
+
220
+ # Set the model in ProviderManager
221
+ self._llm.set_selected_model(model)
206
222
  self.log.info(f"Model updated to: {model}")
207
223
  reply = CompletionReply(
208
224
  items=[CompletionItem(content=f"Model updated to {model}", isIncomplete=False)],
@@ -241,7 +257,6 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
241
257
  await self._message_history.append_message(
242
258
  ai_optimized_message=ai_optimized_message,
243
259
  display_message=display_optimized_message,
244
- model=self._selected_model,
245
260
  llm_provider=self._llm,
246
261
  thread_id=thread_id_to_stop
247
262
  )
@@ -255,8 +270,6 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
255
270
  message_id = parsed_message.get('message_id')
256
271
  stream = parsed_message.get('stream')
257
272
 
258
- # When handling completions, always use the selected model
259
- model = self._selected_model
260
273
  if type == MessageType.CHAT:
261
274
  chat_metadata = ChatMessageMetadata(**metadata_dict)
262
275
 
@@ -268,13 +281,12 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
268
281
  self._llm,
269
282
  self._message_history,
270
283
  message_id,
271
- self.reply,
272
- model
284
+ self.reply
273
285
  )
274
286
  return
275
287
  else:
276
288
  # Regular non-streaming completion
277
- completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
289
+ completion = await get_chat_completion(chat_metadata, self._llm, self._message_history)
278
290
  elif type == MessageType.SMART_DEBUG:
279
291
  smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
280
292
  # Handle streaming if requested and available
@@ -285,13 +297,12 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
285
297
  self._llm,
286
298
  self._message_history,
287
299
  message_id,
288
- self.reply,
289
- model
300
+ self.reply
290
301
  )
291
302
  return
292
303
  else:
293
304
  # Regular non-streaming completion
294
- completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
305
+ completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history)
295
306
  elif type == MessageType.CODE_EXPLAIN:
296
307
  code_explain_metadata = CodeExplainMetadata(**metadata_dict)
297
308
 
@@ -303,25 +314,24 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
303
314
  self._llm,
304
315
  self._message_history,
305
316
  message_id,
306
- self.reply,
307
- model
317
+ self.reply
308
318
  )
309
319
  return
310
320
  else:
311
321
  # Regular non-streaming completion
312
- completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
322
+ completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history)
313
323
  elif type == MessageType.AGENT_EXECUTION:
314
324
  agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
315
- completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
325
+ completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history)
316
326
  elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
317
327
  agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
318
- completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
328
+ completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history)
319
329
  elif type == MessageType.AGENT_SCRATCHPAD_RESULT:
320
330
  scratchpad_result_metadata = ScratchpadResultMetadata(**metadata_dict)
321
- completion = await get_scratchpad_result_completion(scratchpad_result_metadata, self._llm, self._message_history, model)
331
+ completion = await get_scratchpad_result_completion(scratchpad_result_metadata, self._llm, self._message_history)
322
332
  elif type == MessageType.INLINE_COMPLETION:
323
333
  inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
324
- completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
334
+ completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history)
325
335
  else:
326
336
  raise ValueError(f"Invalid message type: {type}")
327
337
 
@@ -11,7 +11,7 @@ from typing import Dict, List, Optional
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
12
  from mito_ai.completions.models import CompletionRequest, ChatThreadMetadata, MessageType, ThreadID
13
13
  from mito_ai.completions.prompt_builders.chat_name_prompt import create_chat_name_prompt
14
- from mito_ai.completions.providers import OpenAIProvider
14
+ from mito_ai.provider_manager import ProviderManager
15
15
  from mito_ai.utils.schema import MITO_FOLDER
16
16
  from mito_ai.utils.message_history_utils import trim_old_messages
17
17
 
@@ -19,16 +19,15 @@ CHAT_HISTORY_VERSION = 2 # Increment this if the schema changes
19
19
  NEW_CHAT_NAME = "(New Chat)"
20
20
  NUMBER_OF_THREADS_CUT_OFF = 50
21
21
 
22
- async def generate_short_chat_name(user_message: str, assistant_message: str, model: str, llm_provider: OpenAIProvider) -> str:
22
+ async def generate_short_chat_name(user_message: str, assistant_message: str, llm_provider: ProviderManager) -> str:
23
23
  prompt = create_chat_name_prompt(user_message, assistant_message)
24
24
 
25
25
  completion = await llm_provider.request_completions(
26
26
  messages=[{"role": "user", "content": prompt}],
27
- # We set the model so we can use the correct model provider, but request_completions will decide to
28
- # use the fast model from that provider to make the request.
29
- model=model,
27
+ # Use fast model from the selected provider for chat name generation
30
28
  message_type=MessageType.CHAT_NAME_GENERATION,
31
- thread_id=None
29
+ thread_id=None,
30
+ use_fast_model=True
32
31
  )
33
32
 
34
33
  # Do a little cleanup of the completion. Gemini seems to return the string
@@ -135,7 +134,7 @@ class GlobalMessageHistory:
135
134
  Returns the AI-optimized history for the specified thread or newest thread.
136
135
  get_display_history(thread_id: Optional[ThreadID] = None) -> List[ChatCompletionMessageParam]:
137
136
  Returns the display-optimized history for the specified thread or newest thread.
138
- append_message(ai_optimized_message: ChatCompletionMessageParam, display_message: ChatCompletionMessageParam, llm_provider: OpenAIProvider, thread_id: Optional[ThreadID] = None) -> None:
137
+ append_message(ai_optimized_message: ChatCompletionMessageParam, display_message: ChatCompletionMessageParam, llm_provider: ProviderManager, thread_id: Optional[ThreadID] = None) -> None:
139
138
  Appends messages to the specified thread (or newest thread) and generates a name if needed.
140
139
  truncate_histories(index: int, thread_id: Optional[ThreadID] = None) -> None:
141
140
  Truncates messages at the given index for the specified thread.
@@ -265,8 +264,7 @@ class GlobalMessageHistory:
265
264
  self,
266
265
  ai_optimized_message: ChatCompletionMessageParam,
267
266
  display_message: ChatCompletionMessageParam,
268
- model: str,
269
- llm_provider: OpenAIProvider,
267
+ llm_provider: ProviderManager,
270
268
  thread_id: ThreadID
271
269
  ) -> None:
272
270
  """
@@ -305,7 +303,7 @@ class GlobalMessageHistory:
305
303
 
306
304
  # Outside the lock, await the name generation if needed
307
305
  if name_gen_input:
308
- new_name = await generate_short_chat_name(str(name_gen_input[0]), str(name_gen_input[1]), model, llm_provider)
306
+ new_name = await generate_short_chat_name(str(name_gen_input[0]), str(name_gen_input[1]), llm_provider)
309
307
  with self._lock:
310
308
  # Update the thread's name if still required
311
309
  thread = self._chat_threads[thread_id]
@@ -0,0 +1,35 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List
5
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
7
+ from mito_ai.completions.prompt_builders.prompt_constants import CHART_CONFIG_RULES
8
+
9
+ def create_chart_add_field_prompt(code: str, user_description: str, existing_variables: List[str]) -> str:
10
+ """
11
+ Create a prompt for adding a new field to the chart configuration.
12
+
13
+ Args:
14
+ code: The current chart code
15
+ user_description: The user's description of what field they want to add
16
+ existing_variables: List of existing variable names in the config
17
+
18
+ Returns:
19
+ A formatted prompt string
20
+ """
21
+ sections: List[PromptSection] = []
22
+
23
+ sections.append(SG.Generic("Instructions", "The user wants to add a new field to the chart configuration. You need to:\n1. Understand what field the user wants to add based on their description\n2. Add the appropriate variable to the chart configuration section\n3. Use the variable in the chart code where appropriate\n4. Return the complete updated code\n\nIMPORTANT: If you cannot add the requested field (e.g., the request is unclear, ambiguous, or not applicable to chart configuration), do NOT return any code block. Simply respond with a brief explanation without including any Python code blocks."))
24
+
25
+ sections.append(SG.Generic("Chart Config Rules", CHART_CONFIG_RULES))
26
+
27
+ existing_vars_text = ", ".join(existing_variables) if existing_variables else "none"
28
+ sections.append(SG.Generic("Existing Variables", f"The following variables already exist in the chart configuration: {existing_vars_text}"))
29
+
30
+ sections.append(SG.Generic("User Request", f"The user wants to add a field for: {user_description}"))
31
+
32
+ sections.append(SG.Generic("Current Code", f"```python\n{code}\n```"))
33
+
34
+ prompt = Prompt(sections)
35
+ return str(prompt)
@@ -20,6 +20,8 @@ Rules:
20
20
  - All imports must appear at the top, before the chart configuration section.
21
21
  - Variables with multiple words should be underscore-separated.
22
22
  - All colors should be in hex format (e.g., "#3498db"). Use quotes around the hex string: COLOR = "#3498db" or COLOR = '#3498db'. Do NOT nest quotes.
23
+ - Never use RGB/RGBA tuples/lists for colors (e.g. (0, 0.4, 0.8, 0.8) is forbidden).
24
+ - If transparency is needed, store it separately as ALPHA = 0.8 and apply it in code (e.g. to_rgba(HEX_COLOR, ALPHA)).
23
25
  - Variables can only be strings, numbers, booleans, tuples, or lists.
24
26
  - NEVER include comments on the same line as a variable assignment. Each variable assignment must be on its own line with no trailing comments.
25
27
  - For string values, use either single or double quotes (e.g., TITLE = "Sales by Product" or TITLE = 'Sales by Product'). Do not use nested quotes (e.g., do NOT use '"value"').
mito_ai/constants.py CHANGED
@@ -2,10 +2,10 @@
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
4
  import os
5
- from typing import Union
5
+ from typing import Union, List
6
6
 
7
7
  # Claude
8
- CLAUDE_API_KEY = os.environ.get("CLAUDE_API_KEY")
8
+ ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
9
9
 
10
10
  # Gemini
11
11
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
@@ -23,6 +23,35 @@ AZURE_OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION")
23
23
  AZURE_OPENAI_ENDPOINT = os.environ.get("AZURE_OPENAI_ENDPOINT")
24
24
  AZURE_OPENAI_MODEL = os.environ.get("AZURE_OPENAI_MODEL")
25
25
 
26
+ def parse_comma_separated_models(models_str: str) -> List[str]:
27
+ """
28
+ Parse a comma-separated string of model names into a list.
29
+ Handles quoted and unquoted values, stripping whitespace and quotes.
30
+
31
+ Args:
32
+ models_str: Comma-separated string of model names (e.g., "model1,model2" or '"model1","model2"')
33
+
34
+ Returns:
35
+ List of model names with whitespace and quotes stripped
36
+ """
37
+ if not models_str:
38
+ return []
39
+ return [model.strip().strip('"\'') for model in models_str.split(",") if model.strip()]
40
+
41
+ # LiteLLM Config (Enterprise mode only)
42
+ LITELLM_BASE_URL = os.environ.get("LITELLM_BASE_URL")
43
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
44
+ LITELLM_MODELS_STR = os.environ.get("LITELLM_MODELS", "")
45
+ # Parse comma-separated string into list, strip whitespace and quotes
46
+ LITELLM_MODELS = parse_comma_separated_models(LITELLM_MODELS_STR)
47
+
48
+ # Abacus AI Config (Enterprise mode only)
49
+ ABACUS_BASE_URL = os.environ.get("ABACUS_BASE_URL")
50
+ ABACUS_API_KEY = os.environ.get("ABACUS_API_KEY")
51
+ ABACUS_MODELS_STR = os.environ.get("ABACUS_MODELS", "")
52
+ # Parse comma-separated string into list, strip whitespace and quotes
53
+ ABACUS_MODELS = parse_comma_separated_models(ABACUS_MODELS_STR)
54
+
26
55
  # Mito AI Base URLs and Endpoint Paths
27
56
  MITO_PROD_BASE_URL = "https://7eax4i53f5odkshhlry4gw23by0yvnuv.lambda-url.us-east-1.on.aws/v2"
28
57
  MITO_DEV_BASE_URL = "https://g5vwmogjg7gh7aktqezyrvcq6a0hyfnr.lambda-url.us-east-1.on.aws/v2"
@@ -1,3 +1,3 @@
1
1
  # Copyright (c) Saga Inc.
2
- # Distributed under the terms of the GNU Affero General Public License v3.0 License.
2
+ # Distributed under the terms of the Enterprise License at the root of this repository.
3
3