mito-ai 0.1.45__py3-none-any.whl → 0.1.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mito-ai might be problematic. Click here for more details.

Files changed (82) hide show
  1. mito_ai/__init__.py +10 -1
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +90 -5
  4. mito_ai/app_deploy/handlers.py +97 -77
  5. mito_ai/app_deploy/models.py +16 -12
  6. mito_ai/chat_history/handlers.py +63 -0
  7. mito_ai/chat_history/urls.py +32 -0
  8. mito_ai/completions/handlers.py +18 -20
  9. mito_ai/completions/models.py +4 -1
  10. mito_ai/completions/prompt_builders/agent_execution_prompt.py +6 -1
  11. mito_ai/completions/prompt_builders/agent_system_message.py +63 -4
  12. mito_ai/completions/prompt_builders/chat_system_message.py +4 -0
  13. mito_ai/completions/prompt_builders/prompt_constants.py +1 -0
  14. mito_ai/completions/prompt_builders/utils.py +14 -0
  15. mito_ai/constants.py +3 -0
  16. mito_ai/path_utils.py +56 -0
  17. mito_ai/streamlit_conversion/agent_utils.py +27 -106
  18. mito_ai/streamlit_conversion/prompts/prompt_constants.py +166 -53
  19. mito_ai/streamlit_conversion/prompts/streamlit_app_creation_prompt.py +2 -1
  20. mito_ai/streamlit_conversion/prompts/streamlit_error_correction_prompt.py +3 -3
  21. mito_ai/streamlit_conversion/prompts/streamlit_finish_todo_prompt.py +4 -3
  22. mito_ai/streamlit_conversion/{streamlit_system_prompt.py → prompts/streamlit_system_prompt.py} +1 -0
  23. mito_ai/streamlit_conversion/prompts/update_existing_app_prompt.py +50 -0
  24. mito_ai/streamlit_conversion/search_replace_utils.py +93 -0
  25. mito_ai/streamlit_conversion/streamlit_agent_handler.py +103 -119
  26. mito_ai/streamlit_conversion/streamlit_utils.py +18 -68
  27. mito_ai/streamlit_conversion/validate_streamlit_app.py +78 -96
  28. mito_ai/streamlit_preview/handlers.py +44 -85
  29. mito_ai/streamlit_preview/manager.py +6 -6
  30. mito_ai/streamlit_preview/utils.py +19 -18
  31. mito_ai/tests/chat_history/test_chat_history.py +211 -0
  32. mito_ai/tests/message_history/test_message_history_utils.py +43 -19
  33. mito_ai/tests/providers/test_anthropic_client.py +178 -6
  34. mito_ai/tests/streamlit_conversion/test_apply_search_replace.py +226 -0
  35. mito_ai/tests/streamlit_conversion/test_streamlit_agent_handler.py +87 -114
  36. mito_ai/tests/streamlit_conversion/test_streamlit_utils.py +42 -45
  37. mito_ai/tests/streamlit_conversion/test_validate_streamlit_app.py +20 -14
  38. mito_ai/tests/streamlit_preview/test_streamlit_preview_handler.py +13 -16
  39. mito_ai/tests/streamlit_preview/test_streamlit_preview_manager.py +22 -26
  40. mito_ai/tests/user/__init__.py +2 -0
  41. mito_ai/tests/user/test_user.py +120 -0
  42. mito_ai/user/handlers.py +45 -0
  43. mito_ai/user/urls.py +21 -0
  44. mito_ai/utils/anthropic_utils.py +8 -6
  45. mito_ai/utils/create.py +17 -1
  46. mito_ai/utils/error_classes.py +42 -0
  47. mito_ai/utils/message_history_utils.py +7 -4
  48. mito_ai/utils/telemetry_utils.py +79 -11
  49. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +1 -1
  50. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  51. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  52. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js → mito_ai-0.1.47.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.2db61d2b629817845901.js +2126 -363
  53. mito_ai-0.1.47.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.2db61d2b629817845901.js.map +1 -0
  54. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js → mito_ai-0.1.47.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.e22c6cd4e56c32116daa.js +9 -9
  55. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.684f82575fcc2e3b350c.js.map → mito_ai-0.1.47.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.e22c6cd4e56c32116daa.js.map +1 -1
  56. {mito_ai-0.1.45.dist-info → mito_ai-0.1.47.dist-info}/METADATA +1 -1
  57. {mito_ai-0.1.45.dist-info → mito_ai-0.1.47.dist-info}/RECORD +81 -69
  58. mito_ai-0.1.45.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.0c3368195d954d2ed033.js.map +0 -1
  59. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  60. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  61. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  62. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  63. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  64. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js +0 -0
  65. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.5876024bb17dbd6a3ee6.js.map +0 -0
  66. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  67. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  68. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  69. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  70. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  71. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  72. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  73. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  74. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  75. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  76. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  77. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  78. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  79. {mito_ai-0.1.45.data → mito_ai-0.1.47.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  80. {mito_ai-0.1.45.dist-info → mito_ai-0.1.47.dist-info}/WHEEL +0 -0
  81. {mito_ai-0.1.45.dist-info → mito_ai-0.1.47.dist-info}/entry_points.txt +0 -0
  82. {mito_ai-0.1.45.dist-info → mito_ai-0.1.47.dist-info}/licenses/LICENSE +0 -0
@@ -49,11 +49,8 @@ from mito_ai.utils.telemetry_utils import identify
49
49
 
50
50
  FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
51
51
 
52
- # The GlobalMessageHistory is responsible for updating the message histories stored in the .mito/ai-chats directory.
53
- # We create one GlobalMessageHistory per backend server instance instead of one per websocket connection so that the
54
- # there is one manager of the locks for the .mito/ai-chats directory. This is my current understanding and it
55
- # might be incorrect!
56
- message_history = GlobalMessageHistory()
52
+ # The GlobalMessageHistory is now created in __init__.py and passed to handlers
53
+ # to ensure there's only one instance managing the .mito/ai-chats directory locks
57
54
 
58
55
  # This handler is responsible for the mito_ai/completions endpoint.
59
56
  # It takes a message from the user, sends it to the OpenAI API, and returns the response.
@@ -62,10 +59,11 @@ message_history = GlobalMessageHistory()
62
59
  class CompletionHandler(JupyterHandler, WebSocketHandler):
63
60
  """Completion websocket handler."""
64
61
 
65
- def initialize(self, llm: OpenAIProvider) -> None:
62
+ def initialize(self, llm: OpenAIProvider, message_history: GlobalMessageHistory) -> None:
66
63
  super().initialize()
67
64
  self.log.debug("Initializing websocket connection %s", self.request.path)
68
65
  self._llm = llm
66
+ self._message_history = message_history
69
67
  self.is_pro = is_pro()
70
68
  self._selected_model = FALLBACK_MODEL
71
69
  self.is_electron = False
@@ -150,7 +148,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
150
148
 
151
149
  # Clear history if the type is "start_new_chat"
152
150
  if type == MessageType.START_NEW_CHAT:
153
- thread_id = message_history.create_new_thread()
151
+ thread_id = self._message_history.create_new_thread()
154
152
 
155
153
  reply = StartNewChatReply(
156
154
  parent_id=parsed_message.get("message_id"),
@@ -161,7 +159,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
161
159
 
162
160
  # Handle get_threads: return list of chat threads
163
161
  if type == MessageType.GET_THREADS:
164
- threads = message_history.get_threads()
162
+ threads = self._message_history.get_threads()
165
163
  reply = FetchThreadsReply(
166
164
  parent_id=parsed_message.get("message_id"),
167
165
  threads=threads
@@ -173,7 +171,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
173
171
  if type == MessageType.DELETE_THREAD:
174
172
  thread_id_to_delete = metadata_dict.get('thread_id')
175
173
  if thread_id_to_delete:
176
- is_thread_deleted = message_history.delete_thread(thread_id_to_delete)
174
+ is_thread_deleted = self._message_history.delete_thread(thread_id_to_delete)
177
175
  reply = DeleteThreadReply(
178
176
  parent_id=parsed_message.get("message_id"),
179
177
  success=is_thread_deleted
@@ -189,7 +187,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
189
187
 
190
188
  # If a thread_id is provided, use that thread's history; otherwise, use newest.
191
189
  thread_id = metadata_dict.get('thread_id')
192
- display_history = message_history.get_display_history(thread_id)
190
+ display_history = self._message_history.get_display_history(thread_id)
193
191
 
194
192
  reply = FetchHistoryReply(
195
193
  parent_id=parsed_message.get('message_id'),
@@ -238,7 +236,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
238
236
  "content": "Agent interupted by user "
239
237
  }
240
238
 
241
- await message_history.append_message(
239
+ await self._message_history.append_message(
242
240
  ai_optimized_message=ai_optimized_message,
243
241
  display_message=display_optimized_message,
244
242
  model=self._selected_model,
@@ -266,7 +264,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
266
264
  await stream_chat_completion(
267
265
  chat_metadata,
268
266
  self._llm,
269
- message_history,
267
+ self._message_history,
270
268
  message_id,
271
269
  self.reply,
272
270
  model
@@ -274,7 +272,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
274
272
  return
275
273
  else:
276
274
  # Regular non-streaming completion
277
- completion = await get_chat_completion(chat_metadata, self._llm, message_history, model)
275
+ completion = await get_chat_completion(chat_metadata, self._llm, self._message_history, model)
278
276
  elif type == MessageType.SMART_DEBUG:
279
277
  smart_debug_metadata = SmartDebugMetadata(**metadata_dict)
280
278
  # Handle streaming if requested and available
@@ -283,7 +281,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
283
281
  await stream_smart_debug_completion(
284
282
  smart_debug_metadata,
285
283
  self._llm,
286
- message_history,
284
+ self._message_history,
287
285
  message_id,
288
286
  self.reply,
289
287
  model
@@ -291,7 +289,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
291
289
  return
292
290
  else:
293
291
  # Regular non-streaming completion
294
- completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, message_history, model)
292
+ completion = await get_smart_debug_completion(smart_debug_metadata, self._llm, self._message_history, model)
295
293
  elif type == MessageType.CODE_EXPLAIN:
296
294
  code_explain_metadata = CodeExplainMetadata(**metadata_dict)
297
295
 
@@ -301,7 +299,7 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
301
299
  await stream_code_explain_completion(
302
300
  code_explain_metadata,
303
301
  self._llm,
304
- message_history,
302
+ self._message_history,
305
303
  message_id,
306
304
  self.reply,
307
305
  model
@@ -309,16 +307,16 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
309
307
  return
310
308
  else:
311
309
  # Regular non-streaming completion
312
- completion = await get_code_explain_completion(code_explain_metadata, self._llm, message_history, model)
310
+ completion = await get_code_explain_completion(code_explain_metadata, self._llm, self._message_history, model)
313
311
  elif type == MessageType.AGENT_EXECUTION:
314
312
  agent_execution_metadata = AgentExecutionMetadata(**metadata_dict)
315
- completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, message_history, model)
313
+ completion = await get_agent_execution_completion(agent_execution_metadata, self._llm, self._message_history, model)
316
314
  elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
317
315
  agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
318
- completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, message_history, model)
316
+ completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
319
317
  elif type == MessageType.INLINE_COMPLETION:
320
318
  inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
321
- completion = await get_inline_completion(inline_completer_metadata, self._llm, message_history, model)
319
+ completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
322
320
  else:
323
321
  raise ValueError(f"Invalid message type: {type}")
324
322
 
@@ -29,12 +29,13 @@ class CellUpdate(BaseModel):
29
29
  # for now and rely on the AI to respond with the correct types, following the format
30
30
  # that we show it in the system prompt.
31
31
  class AgentResponse(BaseModel):
32
- type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task']
32
+ type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task', 'create_streamlit_app', 'edit_streamlit_app']
33
33
  message: str
34
34
  cell_update: Optional[CellUpdate]
35
35
  get_cell_output_cell_id: Optional[str]
36
36
  next_steps: Optional[List[str]]
37
37
  analysis_assumptions: Optional[List[str]]
38
+ edit_streamlit_app_prompt: Optional[str]
38
39
 
39
40
 
40
41
  @dataclass(frozen=True)
@@ -65,6 +66,7 @@ class MessageType(Enum):
65
66
  UPDATE_MODEL_CONFIG = "update_model_config"
66
67
  STREAMLIT_CONVERSION = "streamlit_conversion"
67
68
  STOP_AGENT = "stop_agent"
69
+ DEPLOY_APP = "deploy_app"
68
70
 
69
71
 
70
72
  @dataclass(frozen=True)
@@ -101,6 +103,7 @@ class AgentExecutionMetadata():
101
103
  files: Optional[List[str]] = None
102
104
  index: Optional[int] = None
103
105
  additionalContext: Optional[List[Dict[str, str]]] = None
106
+ streamlitAppIsOpen: Optional[bool] = None
104
107
 
105
108
  @dataclass(frozen=True)
106
109
  class AgentSmartDebugMetadata():
@@ -5,12 +5,14 @@ from mito_ai.completions.models import AgentExecutionMetadata
5
5
  from mito_ai.completions.prompt_builders.prompt_constants import (
6
6
  FILES_SECTION_HEADING,
7
7
  JUPYTER_NOTEBOOK_SECTION_HEADING,
8
+ STREAMLIT_APP_STATUS_SECTION_HEADING,
8
9
  VARIABLES_SECTION_HEADING,
9
10
  cell_update_output_str
10
11
  )
11
12
  from mito_ai.completions.prompt_builders.utils import (
12
13
  get_rules_str,
13
14
  get_selected_context_str,
15
+ get_streamlit_app_status_str,
14
16
  )
15
17
 
16
18
 
@@ -20,7 +22,7 @@ def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
20
22
  ai_optimized_cells_str = '\n'.join([f"{cell}" for cell in md.aiOptimizedCells or []])
21
23
  rules_str = get_rules_str(md.additionalContext)
22
24
  selected_context_str = get_selected_context_str(md.additionalContext)
23
-
25
+ streamlit_status_str = get_streamlit_app_status_str(md.streamlitAppIsOpen)
24
26
  context_str = f"""Remember to choose the correct tool to respond with.
25
27
 
26
28
  {rules_str}
@@ -35,6 +37,9 @@ def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
35
37
  {FILES_SECTION_HEADING}
36
38
  {files_str}
37
39
 
40
+ {STREAMLIT_APP_STATUS_SECTION_HEADING}
41
+ {streamlit_status_str}
42
+
38
43
  {selected_context_str}
39
44
 
40
45
  {cell_update_output_str(md.base64EncodedActiveCellOutput is not None)}"""
@@ -225,6 +225,57 @@ Important information:
225
225
  5. Do not use this tool repeatedly if it continues to produce errors - instead, focus on fixing the specific error that occurred.
226
226
  ====
227
227
 
228
+ TOOL: CREATE_STREAMLIT_APP
229
+
230
+ When you want to create a new Streamlit app from the current notebook, respond with this format:
231
+
232
+ {{
233
+ type: 'create_streamlit_app',
234
+ message: str
235
+ }}
236
+
237
+ Important information:
238
+ 1. The message is a short summary of why you're creating the Streamlit app.
239
+ 2. Only use this tool when the user explicitly asks to create or preview a Streamlit app AND no Streamlit app is currently open.
240
+ 3. This tool creates a new app from scratch - use EDIT_STREAMLIT_APP tool if the user is asking you to edit, update, or modify an app that already exists.
241
+ 4. Using this tool will automatically open the app so the user can see a preview of the app.
242
+ 5. When you use this tool, assume that it successfully created the Streamlit app unless the user explicitly tells you otherwise. The app will remain open so that the user can view it until the user decides to close it. You do not need to continually use the create_streamlit_app tool to keep the app open.
243
+
244
+ <Example>
245
+
246
+ Your task: Show me my notebook as an app.
247
+
248
+ Output:
249
+ {{
250
+ type: 'create_streamlit_app',
251
+ message: "I'll convert your notebook into an app."
252
+ }}
253
+
254
+ The user will see a preview of the app and because you fulfilled your task, you can next respond with a FINISHED_TASK tool message.
255
+
256
+ <Example>
257
+
258
+ ====
259
+
260
+ TOOL: EDIT_STREAMLIT_APP
261
+
262
+ When you want to edit an existing Streamlit app, respond with this format:
263
+
264
+ {{
265
+ type: 'edit_streamlit_app',
266
+ message: str,
267
+ edit_streamlit_app_prompt: str
268
+ }}
269
+
270
+ Important information:
271
+ 1. The message is a short summary of why you're editing the Streamlit app.
272
+ 2. The edit_streamlit_app_prompt is REQUIRED and must contain specific instructions for the edit (e.g., "Make the title text larger", "Change the chart colors to blue", "Add a sidebar with filters").
273
+ 3. Only use this tool when the user asks to edit, update, or modify a Streamlit app.
274
+ 4. The app does not need to already be open for you to use the tool. Using this tool will automatically open the streamlit app after applying the changes so the user can view it. You do not need to call the create_streamlit_app tool first.
275
+ 5. When you use this tool, assume that it successfully edited the Streamlit app unless the user explicitly tells you otherwise. The app will remain open so that the user can view it until the user decides to close it.
276
+
277
+ ====
278
+
228
279
  TOOL: FINISHED_TASK
229
280
 
230
281
  When you have completed the user's task, respond with a message in this format:
@@ -238,8 +289,8 @@ When you have completed the user's task, respond with a message in this format:
238
289
  Important information:
239
290
  1. The message is a short summary of the ALL the work that you've completed on this task. It should not just refer to the final message. It could be something like "I've completed the sales strategy analysis by exploring key relationships in the data and summarizing creating a report with three recommendations to boost sales.""
240
291
  2. The message should include citations for any insights that you shared with the user.
241
- 3. The next_steps is an optional list of 2 or 3 suggested follow-up tasks or analyses that the user might want to perform next. These should be concise, actionable suggestions that build on the work you've just completed. For example: ["Visualize the results", "Export the cleaned data to CSV", "Perform statistical analysis on the key metrics"].
242
- 4. The next_steps should be as relevant to the user's actual task as possible. Try your best not to make generic suggestions like "Analyze the data" or "Visualize the results". For example, if the user just asked you to calculate LTV of their customers, you might suggest the following next steps: ["Graph key LTV drivers: churn and average transaction value", "Visualize LTV per customer age group"].
292
+ 3. The next_steps is an optional list of 2 or 3 suggested follow-up tasks or analyses that the user might want to perform next. These should be concise, actionable suggestions that build on the work you've just completed. For example: ["Export the cleaned data to CSV", "Analyze revenue per customer", "Convert notebook into an app"].
293
+ 4. The next_steps should be as relevant to the user's actual task as possible. Try your best not to make generic suggestions like "Analyze the data" or "Visualize the results". For example, if the user just asked you to calculate LTV of their customers, you might suggest the following next steps: ["Graph key LTV drivers: churn and average transaction value", "Visualize LTV per age group"].
243
294
  5. If you are not sure what the user might want to do next, err on the side of suggesting next steps instead of making an assumption and using more CELL_UPDATES.
244
295
  6. If the user's task doesn't involve creating or modifying a code cell, you should respond with a FINISHED_TASK response.
245
296
  7. If the user is just sending a friendly greeting (like "Hello", "Hi", "Hey", "How are you?", "What can you help me with?", etc.), you must respond with a FINISHED_TASK response message with a friendly message like this: "Hello! I'm Mito AI, your AI assistant for data analysis and Python programming in Jupyter notebooks. I can help you analyze datasets, create visualizations, clean data, and much more. What would you like to work on today?"
@@ -391,7 +442,9 @@ As you are guiding the user through the process of completing the task, send the
391
442
 
392
443
  The user is a beginning Python user, so you will need to be careful to send them only small steps to complete. Don't try to complete the task in a single response to the user. Instead, each message you send to the user should only contain a single, small step towards the end goal. When the user has completed the step, they will let you know that they are ready for the next step.
393
444
 
394
- You will keep working in the following iterative format until you have decided that you have finished the user's request. When you decide that you have finished the user's request, respond with a FINISHED_TASK tool message. Otherwise, if you have not finished the user's request, respond with a CELL_UPDATE {OR_GET_CELL_OUTPUT} tool message. When you respond with a CELL_UPDATE, the user will apply the CELL_UPDATE to the notebook and run the new code cell. The user will then send you a message with an updated version of the variables defined in the kernel, code in the notebook, and files in the current directory. In addition, the user will check if the code you provided produced an errored when executed. If it did produce an error, the user will share the error message with you.
445
+ You will keep working in the following iterative format until you have decided that you have finished the user's request. When you decide that you have finished the user's request, respond with a FINISHED_TASK tool message. Otherwise, if you have not finished the user's request, respond with one of your other tools.
446
+
447
+ When you respond with a CELL_UPDATE, the user will apply the CELL_UPDATE to the notebook and run the new code cell. The user will then send you a message with an updated version of the variables defined in the kernel, code in the notebook, and files in the current directory. In addition, the user will check if the code you provided produced an errored when executed. If it did produce an error, the user will share the error message with you.
395
448
 
396
449
  Whenever you get a message back from the user, you should:
397
450
  1. Ask yourself if the previous message you sent to the user was correct. You can answer this question by reviewing the updated code, variables, or output of the cell if you requested it.
@@ -410,4 +463,10 @@ REMEMBER, YOU ARE GOING TO COMPLETE THE USER'S TASK OVER THE COURSE OF THE ENTIR
410
463
  - If you are happy with the analysis, refer back to the original task provided by the user to decide your next steps. In this example, it is to graph the results, so you will send a CellAddition to construct the graph.
411
464
  - Wait for the user to send you back the updated variables and notebook state.
412
465
  {'' if not isChromeBrowser else '- Send a GET_CELL_OUTPUT tool message to get the output of the cell you just created and check if you can improve the graph to make it more readable, informative, or professional.'}
413
- - If after reviewing the updates you decide that you've completed the task, send a FINISHED_TASK tool message."""
466
+ - If after reviewing the updates you decide that you've completed the task, send a FINISHED_TASK tool message.
467
+
468
+ ====
469
+
470
+ OTHER USEFUL INFORMATION:
471
+ 1. When importing matplotlib, write the code `%matplotlib inline` to make sure the graphs render in Jupyter
472
+ """
@@ -21,6 +21,10 @@ There are three possible types of responses you might give:
21
21
  2. Explanation/Analysis: If the task does not require a code update, it might instead require you to provide an explanation of existing code or data, provide an analysis of the the data or chart.
22
22
  3. Friendly Response: If the user is just asking a question, saying hi, or you're just chatting, respond with a friendly response and do not return any code.
23
23
 
24
+ Other useful information:
25
+ 1. The user has two types of modes that they can collaborate with you in: Chat Mode (this mode) and agent mode. Chat mode gives the user more control over the edits made to the notebook and only edits the active cell. Agent mode gives you more autonomy over completing the user's task across mulitple messages. In agent mode, you can edit or create new cells, see the entire notebook, automatically run the code you write, and more.
26
+ 2. If the user asks you to generate a dashboard, app, or streamlit app for them, you should tell them that they must use Agent mode to complete the task. You are not able to automatically switch the user to agent mode, but they can switch to it themselves by using the Chat/Agent mode toggle in the bottom left corner of the Ai taskpane.
27
+
24
28
  ====
25
29
  {CITATION_RULES}
26
30
 
@@ -19,6 +19,7 @@ ACTIVE_CELL_ID_SECTION_HEADING = "The ID of the active code cell:"
19
19
  ACTIVE_CELL_OUTPUT_SECTION_HEADING = "Output of the active code cell:"
20
20
  GET_CELL_OUTPUT_TOOL_RESPONSE_SECTION_HEADING = "Output of the code cell you just applied the CELL_UPDATE to:"
21
21
  JUPYTER_NOTEBOOK_SECTION_HEADING = "Jupyter Notebook:"
22
+ STREAMLIT_APP_STATUS_SECTION_HEADING = "Streamlit App Status:"
22
23
 
23
24
  # Placeholder text used when trimming content from messages
24
25
  CONTENT_REMOVED_PLACEHOLDER = "Content removed to save space"
@@ -69,3 +69,17 @@ def get_selected_context_str(additional_context: Optional[List[Dict[str, str]]])
69
69
 
70
70
  # STEP 3: Combine into a single string
71
71
  return "\n\n".join(context_parts)
72
+
73
+
74
+ def get_streamlit_app_status_str(streamlit_app_is_open: Optional[bool]) -> str:
75
+ """
76
+ Get the streamlit app status string.
77
+ """
78
+ if streamlit_app_is_open is None:
79
+ return ""
80
+
81
+ if streamlit_app_is_open:
82
+ return "A Streamlit app is currently open and running."
83
+ else:
84
+ return "No Streamlit app is currently open."
85
+
mito_ai/constants.py CHANGED
@@ -58,3 +58,6 @@ COGNITO_CONFIG_DEV = {
58
58
  }
59
59
 
60
60
  ACTIVE_COGNITO_CONFIG = COGNITO_CONFIG_DEV # Change to COGNITO_CONFIG_DEV for dev
61
+
62
+
63
+ MESSAGE_HISTORY_TRIM_THRESHOLD: int = 3
mito_ai/path_utils.py ADDED
@@ -0,0 +1,56 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import NewType
5
+ import os
6
+ from mito_ai.utils.error_classes import StreamlitPreviewError
7
+
8
+ # Type definitions for better type safety
9
+ AbsoluteNotebookPath = NewType('AbsoluteNotebookPath', str)
10
+ AbsoluteNotebookDirPath = NewType('AbsoluteNotebookDirPath', str)
11
+ AbsoluteAppPath = NewType('AbsoluteAppPath', str)
12
+
13
+ def get_absolute_notebook_path(notebook_path: str) -> AbsoluteNotebookPath:
14
+ """
15
+ Convert any notebook path to an absolute path.
16
+
17
+ Args:
18
+ notebook_path: Path to the notebook (can be relative or absolute)
19
+
20
+ Returns:
21
+ AbsoluteNotebookPath: The absolute path to the notebook
22
+
23
+ Raises:
24
+ ValueError: If the path is invalid or empty
25
+ """
26
+ if not notebook_path or not notebook_path.strip():
27
+ raise StreamlitPreviewError("Notebook path cannot be empty", 400)
28
+
29
+ absolute_path = os.path.abspath(notebook_path)
30
+ return AbsoluteNotebookPath(absolute_path)
31
+
32
+
33
+ def get_absolute_notebook_dir_path(notebook_path: AbsoluteNotebookPath) -> AbsoluteNotebookDirPath:
34
+ """
35
+ Get the directory containing the notebook.
36
+
37
+ Args:
38
+ notebook_path: Absolute path to the notebook
39
+
40
+ Returns:
41
+ AbsoluteNotebookDirPath: The directory containing the notebook
42
+ """
43
+ return AbsoluteNotebookDirPath(os.path.dirname(notebook_path))
44
+
45
+ def get_absolute_app_path(app_directory: AbsoluteNotebookDirPath) -> AbsoluteAppPath:
46
+ """
47
+ Check if the app.py file exists in the given directory.
48
+ """
49
+ return AbsoluteAppPath(os.path.join(app_directory, "app.py"))
50
+
51
+ def does_app_path_exists(app_path: AbsoluteAppPath) -> bool:
52
+ """
53
+ Check if the app.py file exists in the given directory.
54
+ """
55
+ return os.path.exists(app_path)
56
+
@@ -1,116 +1,37 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
- from typing import List
4
+ from typing import List, Tuple
5
5
  import re
6
- from unidiff import PatchSet
6
+ from anthropic.types import MessageParam
7
+ from mito_ai.streamlit_conversion.prompts.streamlit_system_prompt import streamlit_system_prompt
8
+ from mito_ai.utils.anthropic_utils import stream_anthropic_completion_from_mito_server
7
9
  from mito_ai.streamlit_conversion.prompts.prompt_constants import MITO_TODO_PLACEHOLDER
10
+ from mito_ai.completions.models import MessageType
11
+
12
+ STREAMLIT_AI_MODEL = "claude-sonnet-4-5-20250929"
8
13
 
9
14
  def extract_todo_placeholders(agent_response: str) -> List[str]:
10
15
  """Extract TODO placeholders from the agent's response"""
11
16
  return [line.strip() for line in agent_response.split('\n') if MITO_TODO_PLACEHOLDER in line]
12
17
 
13
-
14
- def apply_patch_to_text(text: str, diff: str) -> str:
15
- """
16
- Apply a *unified-diff* (git-style) patch to the given text and return
17
- the updated contents.
18
-
19
- Parameters
20
- ----------
21
- text : str
22
- The original file contents.
23
- diff : str
24
- A unified diff that transforms *text* into the desired output.
25
- The diff must reference exactly one file (the Streamlit app).
26
-
27
- Returns
28
- -------
29
- str
30
- The patched contents.
31
-
32
- Raises
33
- ------
34
- ValueError
35
- If the patch cannot be applied or references more than one file.
36
- """
37
- # Nothing to do
38
- if not diff.strip():
39
- return text
40
-
41
- # Parse the patch
42
- patch = PatchSet(diff.splitlines(keepends=True))
43
-
44
- # We expect a single-file patch (what the prompt asks the model to emit)
45
- if len(patch) != 1:
46
- raise ValueError(
47
- f"Expected a patch for exactly one file, got {len(patch)} files."
48
- )
49
-
50
- file_patch = patch[0]
51
-
52
- original_lines = text.splitlines(keepends=True)
53
- result_lines: List[str] = []
54
-
55
- cursor = 0 # index in original_lines (0-based)
56
-
57
- for hunk in file_patch:
58
- # Copy unchanged lines before this hunk
59
- while cursor < hunk.source_start - 1:
60
- result_lines.append(original_lines[cursor])
61
- cursor += 1
62
-
63
- # Apply hunk line-by-line
64
- for line in hunk:
65
- if line.is_context:
66
- result_lines.append(original_lines[cursor])
67
- cursor += 1
68
- elif line.is_removed:
69
- cursor += 1 # Skip this line from the original
70
- elif line.is_added:
71
- # Ensure added line ends with newline for consistency
72
- val = line.value
73
- if not val.endswith("\n"):
74
- val += "\n"
75
- result_lines.append(val)
76
-
77
- # Copy any remaining lines after the last hunk
78
- result_lines.extend(original_lines[cursor:])
79
-
80
- return "".join(result_lines)
81
-
82
-
83
- def fix_diff_headers(diff: str) -> str:
84
- """
85
- The AI is generally not very good at counting the number of lines in the diff. If the hunk header has
86
- an incorrect count, then the patch will fail. So instead we just calculate the counts ourselves, its deterministic.
87
- """
88
- lines = diff.split('\n')
89
-
90
- for i, line in enumerate(lines):
91
- if line.startswith('@@'):
92
- # Extract the starting line numbers
93
- match = re.match(r'@@ -(\d+),\d+ \+(\d+),\d+ @@', line)
94
- if match:
95
- old_start = match.group(1)
96
- new_start = match.group(2)
97
-
98
- # Count lines in this hunk
99
- old_count = 0
100
- new_count = 0
101
-
102
- for j in range(i + 1, len(lines)):
103
- next_line = lines[j]
104
- if next_line.startswith('@@') or next_line.startswith('---'):
105
- break
106
- if next_line.startswith(' ') or next_line.startswith('-'):
107
- old_count += 1
108
- if next_line.startswith(' ') or next_line.startswith('+'):
109
- new_count += 1
110
-
111
- # Replace the header with correct counts
112
- lines[i] = f"@@ -{old_start},{old_count} +{new_start},{new_count} @@"
113
-
114
- return '\n'.join(lines)
115
-
116
-
18
+ async def get_response_from_agent(message_to_agent: List[MessageParam]) -> str:
19
+ """Gets the streaming response from the agent using the mito server"""
20
+ model = STREAMLIT_AI_MODEL
21
+ max_tokens = 64000 # TODO: If we move to haiku, we must reset this to 8192
22
+ temperature = 0.2
23
+
24
+ accumulated_response = ""
25
+ async for stream_chunk in stream_anthropic_completion_from_mito_server(
26
+ model = model,
27
+ max_tokens = max_tokens,
28
+ temperature = temperature,
29
+ system = streamlit_system_prompt,
30
+ messages = message_to_agent,
31
+ stream=True,
32
+ message_type=MessageType.STREAMLIT_CONVERSION,
33
+ reply_fn=None,
34
+ message_id=""
35
+ ):
36
+ accumulated_response += stream_chunk
37
+ return accumulated_response