mito-ai 0.1.33__py3-none-any.whl → 0.1.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. mito_ai/__init__.py +49 -9
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +142 -67
  4. mito_ai/{app_builder → app_deploy}/__init__.py +1 -1
  5. mito_ai/app_deploy/app_deploy_utils.py +44 -0
  6. mito_ai/app_deploy/handlers.py +345 -0
  7. mito_ai/{app_builder → app_deploy}/models.py +35 -22
  8. mito_ai/app_manager/__init__.py +4 -0
  9. mito_ai/app_manager/handlers.py +167 -0
  10. mito_ai/app_manager/models.py +71 -0
  11. mito_ai/app_manager/utils.py +24 -0
  12. mito_ai/auth/README.md +18 -0
  13. mito_ai/auth/__init__.py +6 -0
  14. mito_ai/auth/handlers.py +96 -0
  15. mito_ai/auth/urls.py +13 -0
  16. mito_ai/chat_history/handlers.py +63 -0
  17. mito_ai/chat_history/urls.py +32 -0
  18. mito_ai/completions/completion_handlers/agent_execution_handler.py +1 -1
  19. mito_ai/completions/completion_handlers/chat_completion_handler.py +4 -4
  20. mito_ai/completions/completion_handlers/utils.py +99 -37
  21. mito_ai/completions/handlers.py +57 -20
  22. mito_ai/completions/message_history.py +9 -1
  23. mito_ai/completions/models.py +31 -7
  24. mito_ai/completions/prompt_builders/agent_execution_prompt.py +21 -2
  25. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +8 -0
  26. mito_ai/completions/prompt_builders/agent_system_message.py +115 -42
  27. mito_ai/completions/prompt_builders/chat_name_prompt.py +6 -6
  28. mito_ai/completions/prompt_builders/chat_prompt.py +18 -11
  29. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  30. mito_ai/completions/prompt_builders/prompt_constants.py +23 -4
  31. mito_ai/completions/prompt_builders/utils.py +72 -10
  32. mito_ai/completions/providers.py +81 -47
  33. mito_ai/constants.py +25 -24
  34. mito_ai/file_uploads/__init__.py +3 -0
  35. mito_ai/file_uploads/handlers.py +248 -0
  36. mito_ai/file_uploads/urls.py +21 -0
  37. mito_ai/gemini_client.py +44 -48
  38. mito_ai/log/handlers.py +10 -3
  39. mito_ai/log/urls.py +3 -3
  40. mito_ai/openai_client.py +30 -44
  41. mito_ai/path_utils.py +70 -0
  42. mito_ai/streamlit_conversion/agent_utils.py +37 -0
  43. mito_ai/streamlit_conversion/prompts/prompt_constants.py +172 -0
  44. mito_ai/streamlit_conversion/prompts/prompt_utils.py +10 -0
  45. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +46 -0
  46. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +28 -0
  47. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +45 -0
  48. mito_ai/streamlit_conversion/prompts/streamlit_system_prompt.py +56 -0
  49. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  50. mito_ai/streamlit_conversion/search_replace_utils.py +94 -0
  51. mito_ai/streamlit_conversion/streamlit_agent_handler.py +144 -0
  52. mito_ai/streamlit_conversion/streamlit_utils.py +85 -0
  53. mito_ai/streamlit_conversion/validate_streamlit_app.py +105 -0
  54. mito_ai/streamlit_preview/__init__.py +6 -0
  55. mito_ai/streamlit_preview/handlers.py +111 -0
  56. mito_ai/streamlit_preview/manager.py +152 -0
  57. mito_ai/streamlit_preview/urls.py +22 -0
  58. mito_ai/streamlit_preview/utils.py +29 -0
  59. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  60. mito_ai/tests/completions/completion_handlers_utils_test.py +190 -0
  61. mito_ai/tests/deploy_app/test_app_deploy_utils.py +89 -0
  62. mito_ai/tests/file_uploads/__init__.py +2 -0
  63. mito_ai/tests/file_uploads/test_handlers.py +282 -0
  64. mito_ai/tests/message_history/test_generate_short_chat_name.py +0 -4
  65. mito_ai/tests/message_history/test_message_history_utils.py +103 -23
  66. mito_ai/tests/open_ai_utils_test.py +18 -22
  67. mito_ai/tests/providers/test_anthropic_client.py +447 -0
  68. mito_ai/tests/providers/test_azure.py +2 -6
  69. mito_ai/tests/providers/test_capabilities.py +120 -0
  70. mito_ai/tests/{test_gemini_client.py → providers/test_gemini_client.py} +40 -36
  71. mito_ai/tests/providers/test_mito_server_utils.py +448 -0
  72. mito_ai/tests/providers/test_model_resolution.py +130 -0
  73. mito_ai/tests/providers/test_openai_client.py +57 -0
  74. mito_ai/tests/providers/test_provider_completion_exception.py +66 -0
  75. mito_ai/tests/providers/test_provider_limits.py +42 -0
  76. mito_ai/tests/providers/test_providers.py +382 -0
  77. mito_ai/tests/providers/test_retry_logic.py +389 -0
  78. mito_ai/tests/providers/test_stream_mito_server_utils.py +140 -0
  79. mito_ai/tests/providers/utils.py +85 -0
  80. mito_ai/tests/streamlit_conversion/__init__.py +3 -0
  81. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +240 -0
  82. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +246 -0
  83. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +193 -0
  84. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +112 -0
  85. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +118 -0
  86. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +292 -0
  87. mito_ai/tests/test_constants.py +31 -3
  88. mito_ai/tests/test_telemetry.py +12 -0
  89. mito_ai/tests/user/__init__.py +2 -0
  90. mito_ai/tests/user/test_user.py +120 -0
  91. mito_ai/tests/utils/test_anthropic_utils.py +6 -6
  92. mito_ai/user/handlers.py +45 -0
  93. mito_ai/user/urls.py +21 -0
  94. mito_ai/utils/anthropic_utils.py +55 -121
  95. mito_ai/utils/create.py +17 -1
  96. mito_ai/utils/error_classes.py +42 -0
  97. mito_ai/utils/gemini_utils.py +39 -94
  98. mito_ai/utils/message_history_utils.py +7 -4
  99. mito_ai/utils/mito_server_utils.py +242 -0
  100. mito_ai/utils/open_ai_utils.py +38 -155
  101. mito_ai/utils/provider_utils.py +49 -0
  102. mito_ai/utils/server_limits.py +1 -1
  103. mito_ai/utils/telemetry_utils.py +137 -5
  104. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -100
  105. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/package.json +4 -2
  106. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +3 -1
  107. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +2 -2
  108. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js +15948 -8403
  109. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.8f1845da6bf2b128c049.js.map +1 -0
  110. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +198 -0
  111. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +1 -0
  112. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js +58 -33
  113. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.8b24b5b3b93f95205b56.js.map +1 -0
  114. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +10 -2
  115. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +1 -0
  116. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +533 -0
  117. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +1 -0
  118. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +6941 -0
  119. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +1 -0
  120. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +1021 -0
  121. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +1 -0
  122. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +59698 -0
  123. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +1 -0
  124. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +7440 -0
  125. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +1 -0
  126. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js → mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +2 -240
  127. mito_ai-0.1.49.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +1 -0
  128. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/METADATA +5 -2
  129. mito_ai-0.1.49.dist-info/RECORD +205 -0
  130. mito_ai/app_builder/handlers.py +0 -218
  131. mito_ai/tests/providers_test.py +0 -438
  132. mito_ai/tests/test_anthropic_client.py +0 -270
  133. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.281f4b9af60d620c6fb1.js.map +0 -1
  134. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.4f1d00fd0c58fcc05d8d.js.map +0 -1
  135. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/style_index_js.06083e515de4862df010.js.map +0 -1
  136. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js +0 -7842
  137. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_html2canvas_dist_html2canvas_js.ea47e8c8c906197f8d19.js.map +0 -1
  138. mito_ai-0.1.33.data/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.9795f79265ddb416864b.js.map +0 -1
  139. mito_ai-0.1.33.dist-info/RECORD +0 -134
  140. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  141. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  142. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  143. {mito_ai-0.1.33.data → mito_ai-0.1.49.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  144. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/WHEEL +0 -0
  145. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/entry_points.txt +0 -0
  146. {mito_ai-0.1.33.dist-info → mito_ai-0.1.49.dist-info}/licenses/LICENSE +0 -0
@@ -14,6 +14,7 @@ import tornado.web
14
14
  from jupyter_core.utils import ensure_async
15
15
  from jupyter_server.base.handlers import JupyterHandler
16
16
  from tornado.websocket import WebSocketHandler
17
+ from openai.types.chat import ChatCompletionMessageParam
17
18
  from mito_ai.completions.message_history import GlobalMessageHistory
18
19
  from mito_ai.logger import get_logger
19
20
  from mito_ai.completions.models import (
@@ -48,11 +49,8 @@ from mito_ai.utils.telemetry_utils import identify
48
49
 
49
50
  FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
50
51
 
51
- # The GlobalMessageHistory is responsible for updating the message histories stored in the .mito/ai-chats directory.
52
- # We create one GlobalMessageHistory per backend server instance instead of one per websocket connection so that the
53
- # there is one manager of the locks for the .mito/ai-chats directory. This is my current understanding and it
54
- # might be incorrect!
55
- message_history = GlobalMessageHistory()
52
+ # The GlobalMessageHistory is now created in __init__.py and passed to handlers
53
+ # to ensure there's only one instance managing the .mito/ai-chats directory locks
56
54
 
57
55
  # This handler is responsible for the mito_ai/completions endpoint.
58
56
  # It takes a message from the user, sends it to the OpenAI API, and returns the response.
@@ -61,12 +59,14 @@ message_history = GlobalMessageHistory()
61
59
  class CompletionHandler(JupyterHandler, WebSocketHandler):
62
60
  """Completion websocket handler."""
63
61
 
64
- def initialize(self, llm: OpenAIProvider) -> None:
62
+ def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
65
63
  super().initialize()
66
64
  self.log.debug("Initializing websocket connection %s", self.request.path)
67
65
  self._llm = llm
66
+ self._message_history = message_history
68
67
  self.is_pro = is_pro()
69
68
  self._selected_model = FALLBACK_MODEL
69
+ self.is_electron = False
70
70
  identify(llm.key_type)
71
71
 
72
72
  @property
@@ -128,6 +128,18 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
128
128
  parsed_message = json.loads(message)
129
129
  metadata_dict = parsed_message.get('metadata', {})
130
130
  type: MessageType = MessageType(parsed_message.get('type'))
131
+
132
+ # Extract environment information from the message
133
+ environment = parsed_message.get('environment', {})
134
+ if environment:
135
+ is_electron = environment.get('isElectron', None)
136
+ if is_electron is not None:
137
+ if is_electron != self.is_electron:
138
+ # If the is_electron status is different, log it
139
+ identify(key_type=self._llm.key_type, is_electron=is_electron)
140
+
141
+ self.is_electron = is_electron
142
+
131
143
  except ValueError as e:
132
144
  self.log.error("Invalid completion request.", exc_info=e)
133
145
  return
@@ -136,7 +148,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
136
148
 
137
149
  # Clear history if the type is "start_new_chat"
138
150
  if type == MessageType.START_NEW_CHAT:
139
- thread_id = message_history.create_new_thread()
151
+ thread_id = self._message_history.create_new_thread()
140
152
 
141
153
  reply = StartNewChatReply(
142
154
  parent_id=parsed_message.get("message_id"),
@@ -147,7 +159,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
147
159
 
148
160
  # Handle get_threads: return list of chat threads
149
161
  if type == MessageType.GET_THREADS:
150
- threads = message_history.get_threads()
162
+ threads = self._message_history.get_threads()
151
163
  reply = FetchThreadsReply(
152
164
  parent_id=parsed_message.get("message_id"),
153
165
  threads=threads
@@ -159,7 +171,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
159
171
  if type == MessageType.DELETE_THREAD:
160
172
  thread_id_to_delete = metadata_dict.get('thread_id')
161
173
  if thread_id_to_delete:
162
- is_thread_deleted = message_history.delete_thread(thread_id_to_delete)
174
+ is_thread_deleted = self._message_history.delete_thread(thread_id_to_delete)
163
175
  reply = DeleteThreadReply(
164
176
  parent_id=parsed_message.get("message_id"),
165
177
  success=is_thread_deleted
@@ -175,7 +187,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
175
187
 
176
188
  # If a thread_id is provided, use that thread's history; otherwise, use newest.
177
189
  thread_id = metadata_dict.get('thread_id')
178
- display_history = message_history.get_display_history(thread_id)
190
+ display_history = self._message_history.get_display_history(thread_id)
179
191
 
180
192
  reply = FetchHistoryReply(
181
193
  parent_id=parsed_message.get('message_id'),
@@ -209,7 +221,32 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
209
221
  )
210
222
  self.reply(reply)
211
223
  return
212
-
224
+
225
+ if type == MessageType.STOP_AGENT:
226
+ thread_id_to_stop = metadata_dict.get('threadId')
227
+ if thread_id_to_stop:
228
+ self.log.info(f"Stopping agent, thread ID: {thread_id_to_stop}")
229
+
230
+ ai_optimized_message: ChatCompletionMessageParam = {
231
+ "role": "assistant",
232
+ "content": "The user made the following request: Stop processing my last request. I want to change it. Please answer my future requests without going back and finising my previous request."
233
+ }
234
+ display_optimized_message: ChatCompletionMessageParam = {
235
+ "role": "assistant",
236
+ "content": "Agent interupted by user "
237
+ }
238
+
239
+ await self._message_history.append_message(
240
+ ai_optimized_message=ai_optimized_message,
241
+ display_message=display_optimized_message,
242
+ model=self._selected_model,
243
+ llm_provider=self._llm,
244
+ thread_id=thread_id_to_stop
245
+ )
246
+ else:
247
+ self.log.info("Trying to stop agent, but no thread ID available")
248
+ return
249
+
213
250
  try:
214
251
  # Get completion based on message type
215
252
  completion = None
@@ -227,7 +264,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
227
264
  await stream_chat_completion(
228
265
  chat_metadata,
229
266
  self._llm,
230
- message_history,
267
+ self._message_history,
231
268
  message_id,
232
269
  self.reply,
233
270
  model
@@ -235,7 +272,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
235
272
  return
236
273
  else:
237
274
  # Regular non-streaming completion
238
- completion = await get_chat_completion(chat_metadata, self._llm, message_history, model)
275
+ completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
239
276
  elif type == MessageType.SMART_DEBUG:
240
277
  smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
241
278
  # Handle streaming if requested and available
@@ -244,7 +281,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
244
281
  await stream_smart_debug_completion(
245
282
  smart_debug_metadata,
246
283
  self._llm,
247
- message_history,
284
+ self._message_history,
248
285
  message_id,
249
286
  self.reply,
250
287
  model
@@ -252,7 +289,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
252
289
  return
253
290
  else:
254
291
  # Regular non-streaming completion
255
- completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, message_history, model)
292
+ completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
256
293
  elif type == MessageType.CODE_EXPLAIN:
257
294
  code_explain_metadata = CodeExplainMetadata(**metadata_dict)
258
295
 
@@ -262,7 +299,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
262
299
  await stream_code_explain_completion(
263
300
  code_explain_metadata,
264
301
  self._llm,
265
- message_history,
302
+ self._message_history,
266
303
  message_id,
267
304
  self.reply,
268
305
  model
@@ -270,16 +307,16 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
270
307
  return
271
308
  else:
272
309
  # Regular non-streaming completion
273
- completion = await get_code_explain_completion(code_explain_metadata, self._llm, message_history, model)
310
+ completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
274
311
  elif type == MessageType.AGENT_EXECUTION:
275
312
  agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
276
- completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, message_history, model)
313
+ completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
277
314
  elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
278
315
  agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
279
- completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, message_history, model)
316
+ completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
280
317
  elif type == MessageType.INLINE_COMPLETION:
281
318
  inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
282
- completion = await get_inline_completion(inline_completer_metadata, self._llm, message_history, model)
319
+ completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
283
320
  else:
284
321
  raise ValueError(f"Invalid message type: {type}")
285
322
 
@@ -251,7 +251,15 @@ class GlobalMessageHistory:
251
251
  with self._lock:
252
252
  if thread_id not in self._chat_threads:
253
253
  return []
254
- return self._chat_threads[thread_id].display_history
254
+
255
+ thread = self._chat_threads[thread_id]
256
+ display_history = thread.display_history
257
+
258
+ # When we get a thread, update it's last interaction time so that if the
259
+ # user refreshes their browser, this chat will re-appear as the last opened chat.
260
+ self._update_last_interaction(thread)
261
+ self._save_thread_to_disk(thread)
262
+ return display_history
255
263
 
256
264
  async def append_message(
257
265
  self,
@@ -3,10 +3,10 @@
3
3
 
4
4
  import traceback
5
5
  from dataclasses import dataclass, field
6
- from typing import Annotated, List, Literal, Optional, Type, Union, NewType
6
+ from typing import List, Literal, Optional, NewType, Dict, Any
7
7
  from openai.types.chat import ChatCompletionMessageParam
8
8
  from enum import Enum
9
- from pydantic import BaseModel, Field
9
+ from pydantic import BaseModel
10
10
 
11
11
  # The ThreadID is the unique identifier for the chat thread.
12
12
  ThreadID = NewType('ThreadID', str)
@@ -20,6 +20,7 @@ class CellUpdate(BaseModel):
20
20
  index: Optional[int]
21
21
  id: Optional[str]
22
22
  code: str
23
+ code_summary: str
23
24
  cell_type: Optional[Literal['code', 'markdown']]
24
25
 
25
26
 
@@ -28,11 +29,13 @@ class CellUpdate(BaseModel):
28
29
  # for now and rely on the AI to respond with the correct types, following the format
29
30
  # that we show it in the system prompt.
30
31
  class AgentResponse(BaseModel):
31
- type: Literal['cell_update', 'get_cell_output', 'finished_task']
32
+ type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task', 'create_streamlit_app', 'edit_streamlit_app']
32
33
  message: str
33
34
  cell_update: Optional[CellUpdate]
34
35
  get_cell_output_cell_id: Optional[str]
35
36
  next_steps: Optional[List[str]]
37
+ analysis_assumptions: Optional[List[str]]
38
+ edit_streamlit_app_prompt: Optional[str]
36
39
 
37
40
 
38
41
  @dataclass(frozen=True)
@@ -61,6 +64,9 @@ class MessageType(Enum):
61
64
  GET_THREADS = "get_threads"
62
65
  DELETE_THREAD = "delete_thread"
63
66
  UPDATE_MODEL_CONFIG = "update_model_config"
67
+ STREAMLIT_CONVERSION = "streamlit_conversion"
68
+ STOP_AGENT = "stop_agent"
69
+ DEPLOY_APP = "deploy_app"
64
70
 
65
71
 
66
72
  @dataclass(frozen=True)
@@ -82,7 +88,7 @@ class ChatMessageMetadata():
82
88
  base64EncodedActiveCellOutput: Optional[str] = None
83
89
  index: Optional[int] = None
84
90
  stream: bool = False
85
- selectedRules: Optional[List[str]] = None
91
+ additionalContext: Optional[List[Dict[str, str]]] = None
86
92
 
87
93
 
88
94
  @dataclass(frozen=True)
@@ -91,12 +97,15 @@ class AgentExecutionMetadata():
91
97
  threadId: ThreadID
92
98
  input: str
93
99
  aiOptimizedCells: List[AIOptimizedCell]
100
+ activeCellId: str
94
101
  isChromeBrowser: bool
102
+ notebookPath: str
103
+ notebookID: str
95
104
  base64EncodedActiveCellOutput: Optional[str] = None
96
105
  variables: Optional[List[str]] = None
97
106
  files: Optional[List[str]] = None
98
107
  index: Optional[int] = None
99
- selectedRules: Optional[List[str]] = None
108
+ additionalContext: Optional[List[Dict[str, str]]] = None
100
109
 
101
110
  @dataclass(frozen=True)
102
111
  class AgentSmartDebugMetadata():
@@ -133,7 +142,7 @@ class InlineCompleterMetadata():
133
142
  suffix: str
134
143
  variables: Optional[List[str]] = None
135
144
  files: Optional[List[str]] = None
136
-
145
+
137
146
  @dataclass(frozen=True)
138
147
  class CompletionRequest:
139
148
  """
@@ -152,6 +161,9 @@ class CompletionRequest:
152
161
  # Whether to stream the response (if supported by the model).
153
162
  stream: bool = False
154
163
 
164
+ # Environment information from the client
165
+ environment: Optional[Dict[str, Any]] = None
166
+
155
167
 
156
168
  @dataclass(frozen=True)
157
169
  class AICapabilities:
@@ -225,6 +237,19 @@ class CompletionError:
225
237
  While mypy doesn't know about this attribute on BaseException, we need to handle it
226
238
  to properly extract error messages from OpenAI API responses.
227
239
  """
240
+ from mito_ai.utils.mito_server_utils import ProviderCompletionException
241
+
242
+
243
+ # Handle ProviderCompletionException specially
244
+ if isinstance(exception, ProviderCompletionException):
245
+ return CompletionError(
246
+ error_type="LLM Provider Error",
247
+ title=exception.user_friendly_title,
248
+ traceback=traceback.format_exc(),
249
+ hint=exception.user_friendly_hint
250
+ )
251
+
252
+ # Handle all other exceptions as before
228
253
  error_type = type(exception)
229
254
  error_module = getattr(error_type, "__module__", "")
230
255
 
@@ -249,7 +274,6 @@ class CompletionError:
249
274
  hint=hint,
250
275
  )
251
276
 
252
-
253
277
  @dataclass(frozen=True)
254
278
  class ErrorMessage(CompletionError):
255
279
  """
@@ -3,18 +3,29 @@
3
3
 
4
4
  from mito_ai.completions.models import AgentExecutionMetadata
5
5
  from mito_ai.completions.prompt_builders.prompt_constants import (
6
+ ACTIVE_CELL_ID_SECTION_HEADING,
6
7
  FILES_SECTION_HEADING,
7
8
  JUPYTER_NOTEBOOK_SECTION_HEADING,
9
+ STREAMLIT_APP_STATUS_SECTION_HEADING,
8
10
  VARIABLES_SECTION_HEADING,
9
11
  cell_update_output_str
10
12
  )
11
- from mito_ai.completions.prompt_builders.utils import get_rules_str
13
+ from mito_ai.completions.prompt_builders.utils import (
14
+ get_rules_str,
15
+ get_selected_context_str,
16
+ get_streamlit_app_status_str
17
+ )
18
+
12
19
 
13
20
  def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
14
21
  variables_str = '\n'.join([f"{variable}" for variable in md.variables or []])
15
22
  files_str = '\n'.join([f"{file}" for file in md.files or []])
16
23
  ai_optimized_cells_str = '\n'.join([f"{cell}" for cell in md.aiOptimizedCells or []])
17
- rules_str = get_rules_str(md.selectedRules)
24
+ rules_str = get_rules_str(md.additionalContext)
25
+ selected_context_str = get_selected_context_str(md.additionalContext)
26
+
27
+
28
+ streamlit_status_str = get_streamlit_app_status_str(md.notebookID, md.notebookPath)
18
29
 
19
30
  context_str = f"""Remember to choose the correct tool to respond with.
20
31
 
@@ -30,6 +41,14 @@ def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
30
41
  {FILES_SECTION_HEADING}
31
42
  {files_str}
32
43
 
44
+ {STREAMLIT_APP_STATUS_SECTION_HEADING}
45
+ {streamlit_status_str}
46
+
47
+ {ACTIVE_CELL_ID_SECTION_HEADING}
48
+ {md.activeCellId}
49
+
50
+ {selected_context_str}
51
+
33
52
  {cell_update_output_str(md.base64EncodedActiveCellOutput is not None)}"""
34
53
 
35
54
  task_str = '' if md.input == '' else f"""Your task:
@@ -54,6 +54,14 @@ ERROR CORRECTION:
54
54
  - Reuse as much of the existing code as possible.
55
55
  - DO NOT ADD TEMPORARY COMMENTS like '# Fixed the typo here' or '# Added this line to fix the error'
56
56
  - If you encounter a ModuleNotFoundError, you can install the package by adding the the following line to the top of the code cell: `!pip install <package_name> --quiet`.
57
+ - If you encounter a NameError, you can use the RUN_ALL_CELLS tool to run all cells from the top of the notebook to the bottom to bring the variable into scope.
58
+ RUN_ALL_CELLS:
59
+ When you want to execute all cells in the notebook from top to bottom, respond with this format:
60
+ {{
61
+ type: 'run_all_cells',
62
+ message: str
63
+ }}
64
+ Note that if the name error persists even after using run_all_cells, it means that the variable is not defined in the notebook and you should not reuse this tool. Additionally, this tool could also be used to refresh the notebook state.
57
65
 
58
66
  <Example>
59
67