mito-ai 0.1.56__py3-none-any.whl → 0.1.58__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. mito_ai/__init__.py +17 -21
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +24 -14
  4. mito_ai/chart_wizard/__init__.py +3 -0
  5. mito_ai/chart_wizard/handlers.py +113 -0
  6. mito_ai/chart_wizard/urls.py +26 -0
  7. mito_ai/completions/completion_handlers/agent_auto_error_fixup_handler.py +6 -8
  8. mito_ai/completions/completion_handlers/agent_execution_handler.py +6 -8
  9. mito_ai/completions/completion_handlers/chat_completion_handler.py +13 -17
  10. mito_ai/completions/completion_handlers/code_explain_handler.py +13 -17
  11. mito_ai/completions/completion_handlers/completion_handler.py +14 -7
  12. mito_ai/completions/completion_handlers/inline_completer_handler.py +5 -6
  13. mito_ai/completions/completion_handlers/scratchpad_result_handler.py +64 -0
  14. mito_ai/completions/completion_handlers/smart_debug_handler.py +13 -17
  15. mito_ai/completions/completion_handlers/utils.py +3 -7
  16. mito_ai/completions/handlers.py +36 -21
  17. mito_ai/completions/message_history.py +8 -10
  18. mito_ai/completions/models.py +23 -2
  19. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +5 -3
  20. mito_ai/completions/prompt_builders/agent_system_message.py +97 -5
  21. mito_ai/completions/prompt_builders/chart_add_field_prompt.py +35 -0
  22. mito_ai/completions/prompt_builders/chart_conversion_prompt.py +27 -0
  23. mito_ai/completions/prompt_builders/chat_system_message.py +2 -0
  24. mito_ai/completions/prompt_builders/prompt_constants.py +28 -0
  25. mito_ai/completions/prompt_builders/scratchpad_result_prompt.py +17 -0
  26. mito_ai/constants.py +8 -1
  27. mito_ai/enterprise/__init__.py +1 -1
  28. mito_ai/enterprise/litellm_client.py +137 -0
  29. mito_ai/log/handlers.py +1 -1
  30. mito_ai/openai_client.py +10 -90
  31. mito_ai/{completions/providers.py → provider_manager.py} +157 -53
  32. mito_ai/settings/enterprise_handler.py +26 -0
  33. mito_ai/settings/urls.py +2 -0
  34. mito_ai/streamlit_conversion/agent_utils.py +2 -30
  35. mito_ai/streamlit_conversion/streamlit_agent_handler.py +48 -46
  36. mito_ai/streamlit_preview/handlers.py +6 -3
  37. mito_ai/streamlit_preview/urls.py +5 -3
  38. mito_ai/tests/message_history/test_generate_short_chat_name.py +72 -28
  39. mito_ai/tests/providers/test_anthropic_client.py +174 -16
  40. mito_ai/tests/providers/test_azure.py +13 -13
  41. mito_ai/tests/providers/test_capabilities.py +14 -17
  42. mito_ai/tests/providers/test_gemini_client.py +14 -13
  43. mito_ai/tests/providers/test_model_resolution.py +145 -89
  44. mito_ai/tests/providers/test_openai_client.py +209 -13
  45. mito_ai/tests/providers/test_provider_limits.py +5 -5
  46. mito_ai/tests/providers/test_providers.py +229 -51
  47. mito_ai/tests/providers/test_retry_logic.py +13 -22
  48. mito_ai/tests/providers/utils.py +4 -4
  49. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +57 -85
  50. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +4 -1
  51. mito_ai/tests/test_enterprise_mode.py +162 -0
  52. mito_ai/tests/test_model_utils.py +271 -0
  53. mito_ai/utils/anthropic_utils.py +8 -6
  54. mito_ai/utils/gemini_utils.py +0 -3
  55. mito_ai/utils/litellm_utils.py +84 -0
  56. mito_ai/utils/model_utils.py +178 -0
  57. mito_ai/utils/open_ai_utils.py +0 -8
  58. mito_ai/utils/provider_utils.py +6 -21
  59. mito_ai/utils/telemetry_utils.py +14 -2
  60. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
  61. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  62. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  63. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js +2992 -282
  64. mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.03302cc521d72eb56b00.js.map +1 -0
  65. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js +17 -17
  66. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.1e7b5cf362385f109883.js.map → mito_ai-0.1.58.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.570df809a692f53a7ab7.js.map +1 -1
  67. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +7 -2
  68. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/METADATA +2 -1
  69. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/RECORD +94 -81
  70. mito_ai-0.1.56.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.dfd7975de75d64db80d6.js.map +0 -1
  71. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  72. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  73. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  74. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  75. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  76. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  77. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  78. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  79. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  80. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  81. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  82. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  83. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  84. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  85. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  86. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  87. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  88. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  89. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  90. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  91. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  92. {mito_ai-0.1.56.data → mito_ai-0.1.58.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  93. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/WHEEL +0 -0
  94. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/entry_points.txt +0 -0
  95. {mito_ai-0.1.56.dist-info → mito_ai-0.1.58.dist-info}/licenses/LICENSE +0 -0
@@ -3,11 +3,20 @@
3
3
 
4
4
  from typing import Protocol, TypeVar
5
5
  from abc import abstractmethod, ABCMeta
6
- from mito_ai.completions.models import ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
7
- from mito_ai.completions.providers import OpenAIProvider
6
+ from mito_ai.completions.models import ChatMessageMetadata, ScratchpadResultMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
7
+ from mito_ai.provider_manager import ProviderManager
8
8
  from mito_ai.completions.message_history import GlobalMessageHistory
9
9
 
10
- T = TypeVar('T', ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, AgentSmartDebugMetadata, InlineCompleterMetadata, contravariant=True)
10
+ T = TypeVar('T',
11
+ ChatMessageMetadata,
12
+ SmartDebugMetadata,
13
+ CodeExplainMetadata,
14
+ AgentExecutionMetadata,
15
+ AgentSmartDebugMetadata,
16
+ InlineCompleterMetadata,
17
+ ScratchpadResultMetadata,
18
+ contravariant=True
19
+ )
11
20
 
12
21
  class CompletionHandler(Protocol[T], metaclass=ABCMeta):
13
22
  """Protocol defining the interface for completion handlers.
@@ -20,9 +29,8 @@ class CompletionHandler(Protocol[T], metaclass=ABCMeta):
20
29
  @abstractmethod
21
30
  async def get_completion(
22
31
  metadata: T,
23
- provider: OpenAIProvider,
24
- message_history: GlobalMessageHistory,
25
- model: str
32
+ provider: ProviderManager,
33
+ message_history: GlobalMessageHistory
26
34
  ) -> str:
27
35
  """Get a completion from the AI provider.
28
36
 
@@ -30,7 +38,6 @@ class CompletionHandler(Protocol[T], metaclass=ABCMeta):
30
38
  metadata: Metadata about the completion request
31
39
  provider: The AI provider to use
32
40
  message_history: The history of messages in the conversation
33
- model: The model to use for the completion
34
41
 
35
42
  Returns:
36
43
  The completion string from the AI
@@ -5,7 +5,7 @@ from typing import List
5
5
  from openai.types.chat import ChatCompletionMessageParam
6
6
  from mito_ai.completions.models import InlineCompleterMetadata, MessageType
7
7
  from mito_ai.completions.prompt_builders.inline_completer_prompt import create_inline_prompt
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from mito_ai.completions.message_history import GlobalMessageHistory
10
10
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
11
 
@@ -17,9 +17,8 @@ class InlineCompleterHandler(CompletionHandler[InlineCompleterMetadata]):
17
17
  @staticmethod
18
18
  async def get_completion(
19
19
  metadata: InlineCompleterMetadata,
20
- provider: OpenAIProvider,
21
- message_history: GlobalMessageHistory,
22
- model: str
20
+ provider: ProviderManager,
21
+ message_history: GlobalMessageHistory
23
22
  ) -> str:
24
23
  """Get an inline completion from the AI provider."""
25
24
 
@@ -37,9 +36,9 @@ class InlineCompleterHandler(CompletionHandler[InlineCompleterMetadata]):
37
36
  # Get the completion
38
37
  completion = await provider.request_completions(
39
38
  messages=messages,
40
- model=model,
41
39
  message_type=MessageType.INLINE_COMPLETION,
42
- thread_id=None
40
+ thread_id=None,
41
+ use_fast_model=True
43
42
  )
44
43
 
45
44
  return completion
@@ -0,0 +1,64 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List, Literal, Union
5
+ from openai.types.chat import ChatCompletionMessageParam
6
+ from mito_ai.completions.models import ScratchpadResultMetadata, MessageType, ResponseFormatInfo, AgentResponse
7
+ from mito_ai.completions.prompt_builders.scratchpad_result_prompt import create_scratchpad_result_prompt
8
+ from mito_ai.provider_manager import ProviderManager
9
+ from mito_ai.completions.message_history import GlobalMessageHistory
10
+ from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
+ from mito_ai.completions.completion_handlers.utils import append_agent_system_message, create_ai_optimized_message
12
+
13
+ __all__ = ["get_scratchpad_result_completion"]
14
+
15
+ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
16
+ """Handler for scratchpad result completions."""
17
+
18
+ @staticmethod
19
+ async def get_completion(
20
+ metadata: ScratchpadResultMetadata,
21
+ provider: ProviderManager,
22
+ message_history: GlobalMessageHistory
23
+ ) -> str:
24
+ """Get a scratchpad result completion from the AI provider."""
25
+
26
+ if metadata.index is not None:
27
+ message_history.truncate_histories(
28
+ thread_id=metadata.threadId,
29
+ index=metadata.index
30
+ )
31
+
32
+ # Add the system message if it doesn't already exist
33
+ await append_agent_system_message(message_history, provider, metadata.threadId, True)
34
+
35
+ # Create the prompt
36
+ prompt = create_scratchpad_result_prompt(metadata)
37
+ display_prompt = ""
38
+
39
+ # Add the prompt to the message history
40
+ new_ai_optimized_message = create_ai_optimized_message(prompt, None, None)
41
+ new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
42
+
43
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, provider, metadata.threadId)
44
+
45
+ # Get the completion
46
+ completion = await provider.request_completions(
47
+ messages=message_history.get_ai_optimized_history(metadata.threadId),
48
+ response_format_info=ResponseFormatInfo(
49
+ name='agent_response',
50
+ format=AgentResponse
51
+ ),
52
+ message_type=MessageType.AGENT_SCRATCHPAD_RESULT,
53
+ user_input="",
54
+ thread_id=metadata.threadId
55
+ )
56
+
57
+ ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
58
+
59
+ await message_history.append_message(ai_response_message, ai_response_message, provider, metadata.threadId)
60
+
61
+ return completion
62
+
63
+ # Use the static method directly
64
+ get_scratchpad_result_completion = ScratchpadResultHandler.get_completion
@@ -15,7 +15,7 @@ from mito_ai.completions.prompt_builders.smart_debug_prompt import (
15
15
  create_error_prompt,
16
16
  remove_inner_thoughts_from_message,
17
17
  )
18
- from mito_ai.completions.providers import OpenAIProvider
18
+ from mito_ai.provider_manager import ProviderManager
19
19
  from mito_ai.completions.message_history import GlobalMessageHistory
20
20
  from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
21
21
  from mito_ai.completions.completion_handlers.utils import append_chat_system_message
@@ -29,9 +29,8 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
29
29
  @staticmethod
30
30
  async def get_completion(
31
31
  metadata: SmartDebugMetadata,
32
- provider: OpenAIProvider,
33
- message_history: GlobalMessageHistory,
34
- model: str
32
+ provider: ProviderManager,
33
+ message_history: GlobalMessageHistory
35
34
  ) -> str:
36
35
  """Get a smart debug completion from the AI provider."""
37
36
 
@@ -43,7 +42,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
43
42
  thread_id = metadata.threadId
44
43
 
45
44
  # Add the system message if it doesn't already exist
46
- await append_chat_system_message(message_history, model, provider, thread_id)
45
+ await append_chat_system_message(message_history, provider, thread_id)
47
46
 
48
47
  # Create the prompt
49
48
  prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
@@ -53,13 +52,12 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
53
52
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
54
53
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
55
54
  await message_history.append_message(
56
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
55
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
57
56
  )
58
57
 
59
58
  # Get the completion
60
59
  completion = await provider.request_completions(
61
60
  messages=message_history.get_ai_optimized_history(thread_id),
62
- model=model,
63
61
  message_type=MessageType.SMART_DEBUG,
64
62
  user_input=error_message,
65
63
  thread_id=thread_id
@@ -78,7 +76,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
78
76
  "content": display_completion,
79
77
  }
80
78
  await message_history.append_message(
81
- ai_response_message, display_response_message, model, provider, thread_id
79
+ ai_response_message, display_response_message, provider, thread_id
82
80
  )
83
81
 
84
82
  return display_completion
@@ -86,11 +84,10 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
86
84
  @staticmethod
87
85
  async def stream_completion(
88
86
  metadata: SmartDebugMetadata,
89
- provider: OpenAIProvider,
87
+ provider: ProviderManager,
90
88
  message_history: GlobalMessageHistory,
91
89
  message_id: str,
92
- reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None],
93
- model: str
90
+ reply_fn: Callable[[Union[CompletionReply, CompletionStreamChunk]], None]
94
91
  ) -> str:
95
92
  """Stream smart debug completions from the AI provider.
96
93
 
@@ -112,7 +109,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
112
109
  thread_id = metadata.threadId
113
110
 
114
111
  # Add the system message if it doesn't already exist
115
- await append_chat_system_message(message_history, model, provider, thread_id)
112
+ await append_chat_system_message(message_history, provider, thread_id)
116
113
 
117
114
  # Create the prompt
118
115
  prompt = create_error_prompt(error_message, active_cell_code, active_cell_id, variables, files)
@@ -122,18 +119,17 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
122
119
  new_ai_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": prompt}
123
120
  new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
124
121
  await message_history.append_message(
125
- new_ai_optimized_message, new_display_optimized_message, model, provider, thread_id
122
+ new_ai_optimized_message, new_display_optimized_message, provider, thread_id
126
123
  )
127
124
 
128
125
  # Stream the completions using the provider's stream method
129
126
  accumulated_response = await provider.stream_completions(
130
127
  message_type=MessageType.SMART_DEBUG,
131
128
  messages=message_history.get_ai_optimized_history(thread_id),
132
- model=model,
133
129
  message_id=message_id,
130
+ thread_id=thread_id,
134
131
  reply_fn=reply_fn,
135
- user_input=error_message,
136
- thread_id=thread_id
132
+ user_input=error_message
137
133
  )
138
134
 
139
135
  # Process the completion to remove inner thoughts
@@ -149,7 +145,7 @@ class SmartDebugHandler(CompletionHandler[SmartDebugMetadata]):
149
145
  "content": display_completion,
150
146
  }
151
147
  await message_history.append_message(
152
- ai_response_message, display_response_message, model, provider, thread_id
148
+ ai_response_message, display_response_message, provider, thread_id
153
149
  )
154
150
 
155
151
  return display_completion
@@ -5,7 +5,7 @@ import base64
5
5
  from typing import Optional, Union, List, Dict, Any, cast
6
6
  from mito_ai.completions.message_history import GlobalMessageHistory
7
7
  from mito_ai.completions.models import ThreadID
8
- from mito_ai.completions.providers import OpenAIProvider
8
+ from mito_ai.provider_manager import ProviderManager
9
9
  from openai.types.chat import ChatCompletionMessageParam
10
10
  from mito_ai.completions.prompt_builders.chat_system_message import (
11
11
  create_chat_system_message_prompt,
@@ -17,8 +17,7 @@ from mito_ai.completions.prompt_builders.agent_system_message import (
17
17
 
18
18
  async def append_chat_system_message(
19
19
  message_history: GlobalMessageHistory,
20
- model: str,
21
- provider: OpenAIProvider,
20
+ provider: ProviderManager,
22
21
  thread_id: ThreadID,
23
22
  ) -> None:
24
23
 
@@ -39,7 +38,6 @@ async def append_chat_system_message(
39
38
  await message_history.append_message(
40
39
  ai_optimized_message=system_message,
41
40
  display_message=system_message,
42
- model=model,
43
41
  llm_provider=provider,
44
42
  thread_id=thread_id,
45
43
  )
@@ -47,8 +45,7 @@ async def append_chat_system_message(
47
45
 
48
46
  async def append_agent_system_message(
49
47
  message_history: GlobalMessageHistory,
50
- model: str,
51
- provider: OpenAIProvider,
48
+ provider: ProviderManager,
52
49
  thread_id: ThreadID,
53
50
  isChromeBrowser: bool,
54
51
  ) -> None:
@@ -70,7 +67,6 @@ async def append_agent_system_message(
70
67
  await message_history.append_message(
71
68
  ai_optimized_message=system_message,
72
69
  display_message=system_message,
73
- model=model,
74
70
  llm_provider=provider,
75
71
  thread_id=thread_id,
76
72
  )
@@ -34,21 +34,22 @@ from mito_ai.completions.models import (
34
34
  CodeExplainMetadata,
35
35
  AgentExecutionMetadata,
36
36
  InlineCompleterMetadata,
37
+ ScratchpadResultMetadata,
37
38
  MessageType
38
39
  )
39
- from mito_ai.completions.providers import OpenAIProvider
40
+ from mito_ai.provider_manager import ProviderManager
40
41
  from mito_ai.utils.create import initialize_user
41
42
  from mito_ai.utils.version_utils import is_pro
43
+ from mito_ai.utils.model_utils import get_available_models
42
44
  from mito_ai.completions.completion_handlers.chat_completion_handler import get_chat_completion, stream_chat_completion
43
45
  from mito_ai.completions.completion_handlers.smart_debug_handler import get_smart_debug_completion, stream_smart_debug_completion
44
46
  from mito_ai.completions.completion_handlers.code_explain_handler import get_code_explain_completion, stream_code_explain_completion
45
47
  from mito_ai.completions.completion_handlers.inline_completer_handler import get_inline_completion
46
48
  from mito_ai.completions.completion_handlers.agent_execution_handler import get_agent_execution_completion
47
49
  from mito_ai.completions.completion_handlers.agent_auto_error_fixup_handler import get_agent_auto_error_fixup_completion
50
+ from mito_ai.completions.completion_handlers.scratchpad_result_handler import get_scratchpad_result_completion
48
51
  from mito_ai.utils.telemetry_utils import identify
49
52
 
50
- FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
51
-
52
53
  # The GlobalMessageHistory is now created in __init__.py and passed to handlers
53
54
  # to ensure there's only one instance managing the .mito/ai-chats directory locks
54
55
 
@@ -59,13 +60,12 @@ FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
59
60
  class CompletionHandler(JupyterHandler, WebSocketHandler):
60
61
  """Completion websocket handler."""
61
62
 
62
- def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
63
+ def initialize(self, llm: ProviderManager, message_history: GlobalMessageHistory) -> None:
63
64
  super().initialize()
64
65
  self.log.debug("Initializing websocket connection %s", self.request.path)
65
66
  self._llm = llm
66
67
  self._message_history = message_history
67
68
  self.is_pro = is_pro()
68
- self._selected_model = FALLBACK_MODEL
69
69
  self.is_electron = False
70
70
  identify(llm.key_type)
71
71
 
@@ -200,7 +200,25 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
200
200
  if type == MessageType.UPDATE_MODEL_CONFIG:
201
201
  model = metadata_dict.get('model')
202
202
  if model:
203
- self._selected_model = model
203
+ # Validate model is in allowed list
204
+ available_models = get_available_models()
205
+ if model not in available_models:
206
+ error = CompletionError(
207
+ error_type="InvalidModelConfig",
208
+ title="Invalid model configuration",
209
+ traceback="",
210
+ hint=f"Model '{model}' is not in the allowed model list. Available models: {', '.join(available_models)}"
211
+ )
212
+ reply = CompletionReply(
213
+ items=[],
214
+ error=error,
215
+ parent_id=parsed_message.get('message_id')
216
+ )
217
+ self.reply(reply)
218
+ return
219
+
220
+ # Set the model in ProviderManager
221
+ self._llm.set_selected_model(model)
204
222
  self.log.info(f"Model updated to: {model}")
205
223
  reply = CompletionReply(
206
224
  items=[CompletionItem(content=f"Model updated to {model}", isIncomplete=False)],
@@ -239,7 +257,6 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
239
257
  await self._message_history.append_message(
240
258
  ai_optimized_message=ai_optimized_message,
241
259
  display_message=display_optimized_message,
242
- model=self._selected_model,
243
260
  llm_provider=self._llm,
244
261
  thread_id=thread_id_to_stop
245
262
  )
@@ -253,8 +270,6 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
253
270
  message_id = parsed_message.get('message_id')
254
271
  stream = parsed_message.get('stream')
255
272
 
256
- # When handling completions, always use the selected model
257
- model = self._selected_model
258
273
  if type == MessageType.CHAT:
259
274
  chat_metadata = ChatMessageMetadata(**metadata_dict)
260
275
 
@@ -266,13 +281,12 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
266
281
  self._llm,
267
282
  self._message_history,
268
283
  message_id,
269
- self.reply,
270
- model
284
+ self.reply
271
285
  )
272
286
  return
273
287
  else:
274
288
  # Regular non-streaming completion
275
- completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
289
+ completion = await get_chat_completion(chat_metadata, self._llm, self._message_history)
276
290
  elif type == MessageType.SMART_DEBUG:
277
291
  smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
278
292
  # Handle streaming if requested and available
@@ -283,13 +297,12 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
283
297
  self._llm,
284
298
  self._message_history,
285
299
  message_id,
286
- self.reply,
287
- model
300
+ self.reply
288
301
  )
289
302
  return
290
303
  else:
291
304
  # Regular non-streaming completion
292
- completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
305
+ completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history)
293
306
  elif type == MessageType.CODE_EXPLAIN:
294
307
  code_explain_metadata = CodeExplainMetadata(**metadata_dict)
295
308
 
@@ -301,22 +314,24 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
301
314
  self._llm,
302
315
  self._message_history,
303
316
  message_id,
304
- self.reply,
305
- model
317
+ self.reply
306
318
  )
307
319
  return
308
320
  else:
309
321
  # Regular non-streaming completion
310
- completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
322
+ completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history)
311
323
  elif type == MessageType.AGENT_EXECUTION:
312
324
  agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
313
- completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
325
+ completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history)
314
326
  elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
315
327
  agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
316
- completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
328
+ completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history)
329
+ elif type == MessageType.AGENT_SCRATCHPAD_RESULT:
330
+ scratchpad_result_metadata = ScratchpadResultMetadata(**metadata_dict)
331
+ completion = await get_scratchpad_result_completion(scratchpad_result_metadata, self._llm, self._message_history)
317
332
  elif type == MessageType.INLINE_COMPLETION:
318
333
  inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
319
- completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
334
+ completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history)
320
335
  else:
321
336
  raise ValueError(f"Invalid message type: {type}")
322
337
 
@@ -11,7 +11,7 @@ from typing import Dict, List, Optional
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
12
  from mito_ai.completions.models import CompletionRequest, ChatThreadMetadata, MessageType, ThreadID
13
13
  from mito_ai.completions.prompt_builders.chat_name_prompt import create_chat_name_prompt
14
- from mito_ai.completions.providers import OpenAIProvider
14
+ from mito_ai.provider_manager import ProviderManager
15
15
  from mito_ai.utils.schema import MITO_FOLDER
16
16
  from mito_ai.utils.message_history_utils import trim_old_messages
17
17
 
@@ -19,16 +19,15 @@ CHAT_HISTORY_VERSION = 2 # Increment this if the schema changes
19
19
  NEW_CHAT_NAME = "(New Chat)"
20
20
  NUMBER_OF_THREADS_CUT_OFF = 50
21
21
 
22
- async def generate_short_chat_name(user_message: str, assistant_message: str, model: str, llm_provider: OpenAIProvider) -> str:
22
+ async def generate_short_chat_name(user_message: str, assistant_message: str, llm_provider: ProviderManager) -> str:
23
23
  prompt = create_chat_name_prompt(user_message, assistant_message)
24
24
 
25
25
  completion = await llm_provider.request_completions(
26
26
  messages=[{"role": "user", "content": prompt}],
27
- # We set the model so we can use the correct model provider, but request_completions will decide to
28
- # use the fast model from that provider to make the request.
29
- model=model,
27
+ # Use fast model from the selected provider for chat name generation
30
28
  message_type=MessageType.CHAT_NAME_GENERATION,
31
- thread_id=None
29
+ thread_id=None,
30
+ use_fast_model=True
32
31
  )
33
32
 
34
33
  # Do a little cleanup of the completion. Gemini seems to return the string
@@ -135,7 +134,7 @@ class GlobalMessageHistory:
135
134
  Returns the AI-optimized history for the specified thread or newest thread.
136
135
  get_display_history(thread_id: Optional[ThreadID] = None) -> List[ChatCompletionMessageParam]:
137
136
  Returns the display-optimized history for the specified thread or newest thread.
138
- append_message(ai_optimized_message: ChatCompletionMessageParam, display_message: ChatCompletionMessageParam, llm_provider: OpenAIProvider, thread_id: Optional[ThreadID] = None) -> None:
137
+ append_message(ai_optimized_message: ChatCompletionMessageParam, display_message: ChatCompletionMessageParam, llm_provider: ProviderManager, thread_id: Optional[ThreadID] = None) -> None:
139
138
  Appends messages to the specified thread (or newest thread) and generates a name if needed.
140
139
  truncate_histories(index: int, thread_id: Optional[ThreadID] = None) -> None:
141
140
  Truncates messages at the given index for the specified thread.
@@ -265,8 +264,7 @@ class GlobalMessageHistory:
265
264
  self,
266
265
  ai_optimized_message: ChatCompletionMessageParam,
267
266
  display_message: ChatCompletionMessageParam,
268
- model: str,
269
- llm_provider: OpenAIProvider,
267
+ llm_provider: ProviderManager,
270
268
  thread_id: ThreadID
271
269
  ) -> None:
272
270
  """
@@ -305,7 +303,7 @@ class GlobalMessageHistory:
305
303
 
306
304
  # Outside the lock, await the name generation if needed
307
305
  if name_gen_input:
308
- new_name = await generate_short_chat_name(str(name_gen_input[0]), str(name_gen_input[1]), model, llm_provider)
306
+ new_name = await generate_short_chat_name(str(name_gen_input[0]), str(name_gen_input[1]), llm_provider)
309
307
  with self._lock:
310
308
  # Update the thread's name if still required
311
309
  thread = self._chat_threads[thread_id]
@@ -29,13 +29,26 @@ class CellUpdate(BaseModel):
29
29
  # for now and rely on the AI to respond with the correct types, following the format
30
30
  # that we show it in the system prompt.
31
31
  class AgentResponse(BaseModel):
32
- type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task', 'create_streamlit_app', 'edit_streamlit_app']
32
+ type: Literal[
33
+ 'cell_update',
34
+ 'get_cell_output',
35
+ 'run_all_cells',
36
+ 'finished_task',
37
+ 'create_streamlit_app',
38
+ 'edit_streamlit_app',
39
+ 'ask_user_question',
40
+ 'scratchpad',
41
+ ]
33
42
  message: str
34
43
  cell_update: Optional[CellUpdate]
35
44
  get_cell_output_cell_id: Optional[str]
36
45
  next_steps: Optional[List[str]]
37
46
  analysis_assumptions: Optional[List[str]]
38
47
  streamlit_app_prompt: Optional[str]
48
+ question: Optional[str]
49
+ answers: Optional[List[str]]
50
+ scratchpad_code: Optional[str]
51
+ scratchpad_summary: Optional[str]
39
52
 
40
53
 
41
54
  @dataclass(frozen=True)
@@ -67,6 +80,7 @@ class MessageType(Enum):
67
80
  STREAMLIT_CONVERSION = "streamlit_conversion"
68
81
  STOP_AGENT = "stop_agent"
69
82
  DEPLOY_APP = "deploy_app"
83
+ AGENT_SCRATCHPAD_RESULT = "agent:scratchpad-result"
70
84
 
71
85
 
72
86
  @dataclass(frozen=True)
@@ -136,13 +150,20 @@ class CodeExplainMetadata():
136
150
  activeCellCode: Optional[str] = None
137
151
 
138
152
  @dataclass(frozen=True)
139
- class InlineCompleterMetadata():
153
+ class InlineCompleterMetadata():
140
154
  promptType: Literal['inline_completion']
141
155
  prefix: str
142
156
  suffix: str
143
157
  variables: Optional[List[str]] = None
144
158
  files: Optional[List[str]] = None
145
159
 
160
+ @dataclass(frozen=True)
161
+ class ScratchpadResultMetadata():
162
+ promptType: Literal['agent:scratchpad-result']
163
+ threadId: ThreadID
164
+ scratchpadResult: str
165
+ index: Optional[int] = None
166
+
146
167
  @dataclass(frozen=True)
147
168
  class CompletionRequest:
148
169
  """
@@ -22,12 +22,12 @@ def create_agent_smart_debug_prompt(md: AgentSmartDebugMetadata) -> str:
22
22
 
23
23
  Use this strategy for this message only. After this message, continue using the original set of instructions that I provided you.
24
24
 
25
- It is very important that When fixing this error, you do not change the original intent of the code cell.
25
+ It is very important that when fixing this error, you do not change the original intent of the code cell.
26
26
 
27
27
  To fix this error, take the following approach:
28
28
  Step 1: ERROR ANALYSIS: Analyze the error message to identify why the code cell errored.
29
29
  Step 2: INTENT PRESERVATION: Make sure you understand the intent of the CELL_UPDATE so that you can be sure to preserve it when you create a new CELL_UPDATE
30
- Step 3: ERROR CORRECTION: Respond with a new CELL_UPDATE that is applied to the same cell as the erroring CELL_UPDATE.
30
+ Step 3: ERROR CORRECTION: Respond with a new CELL_UPDATE that is applied to the same cell as the erroring CELL_UPDATE or use the ASK_USER_QUESTION tool to get more information about how to proceed.
31
31
 
32
32
  INSTRUCTIONS FOR EACH PHASE
33
33
 
@@ -43,13 +43,15 @@ INTENT PRESERVATION:
43
43
 
44
44
  ERROR CORRECTION:
45
45
 
46
- - Return the full, updated version of cell {md.error_message_producing_code_cell_id} with the error fixed and a short explanation of the error.
46
+ - Use one of your tools to correct the error or get more information from the user on how to proceed.
47
+ - If you use the CELL_UPDATE tool, you must reutn the full updated version of cell {md.error_message_producing_code_cell_id} with the error fixed and a short explanation of the error.
47
48
  - You can only update code in {md.error_message_producing_code_cell_id}. You are unable to edit the code in any other cell when resolving this error.
48
49
  - Propose a solution that fixes the error and does not change the user's intent.
49
50
  - Make the solution as simple as possible.
50
51
  - Reuse as much of the existing code as possible.
51
52
  - DO NOT ADD TEMPORARY COMMENTS like '# Fixed the typo here' or '# Added this line to fix the error'
52
53
  - If you encounter a ModuleNotFoundError, you can install the package by adding the the following line to the top of the code cell: `!pip install <package_name> --quiet`.
54
+ - If the error is not resolvable without getting more information from the user, you can respond with a ASK_USER_QUESTION tool call.
53
55
  - If you encounter a NameError, you can use the RUN_ALL_CELLS tool to run all cells from the top of the notebook to the bottom to bring the variable into scope.
54
56
  RUN_ALL_CELLS:
55
57
  When you want to execute all cells in the notebook from top to bottom, respond with this format: