mito-ai 0.1.55__py3-none-any.whl → 0.1.57__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. mito_ai/__init__.py +2 -0
  2. mito_ai/_version.py +1 -1
  3. mito_ai/anthropic_client.py +7 -6
  4. mito_ai/chart_wizard/__init__.py +3 -0
  5. mito_ai/chart_wizard/handlers.py +52 -0
  6. mito_ai/chart_wizard/urls.py +23 -0
  7. mito_ai/completions/completion_handlers/completion_handler.py +11 -2
  8. mito_ai/completions/completion_handlers/scratchpad_result_handler.py +66 -0
  9. mito_ai/completions/handlers.py +5 -0
  10. mito_ai/completions/models.py +24 -3
  11. mito_ai/completions/prompt_builders/agent_execution_prompt.py +18 -50
  12. mito_ai/completions/prompt_builders/agent_smart_debug_prompt.py +82 -95
  13. mito_ai/completions/prompt_builders/agent_system_message.py +304 -276
  14. mito_ai/completions/prompt_builders/chart_conversion_prompt.py +27 -0
  15. mito_ai/completions/prompt_builders/chat_prompt.py +15 -100
  16. mito_ai/completions/prompt_builders/chat_system_message.py +98 -72
  17. mito_ai/completions/prompt_builders/explain_code_prompt.py +22 -24
  18. mito_ai/completions/prompt_builders/inline_completer_prompt.py +78 -107
  19. mito_ai/completions/prompt_builders/prompt_constants.py +35 -45
  20. mito_ai/completions/prompt_builders/prompt_section_registry/__init__.py +70 -0
  21. mito_ai/completions/prompt_builders/prompt_section_registry/active_cell_code.py +15 -0
  22. mito_ai/completions/prompt_builders/prompt_section_registry/active_cell_id.py +10 -0
  23. mito_ai/completions/prompt_builders/prompt_section_registry/active_cell_output.py +20 -0
  24. mito_ai/completions/prompt_builders/prompt_section_registry/base.py +37 -0
  25. mito_ai/completions/prompt_builders/prompt_section_registry/error_traceback.py +17 -0
  26. mito_ai/completions/prompt_builders/prompt_section_registry/example.py +19 -0
  27. mito_ai/completions/prompt_builders/prompt_section_registry/files.py +17 -0
  28. mito_ai/completions/prompt_builders/prompt_section_registry/generic.py +15 -0
  29. mito_ai/completions/prompt_builders/prompt_section_registry/get_cell_output_tool_response.py +21 -0
  30. mito_ai/completions/prompt_builders/prompt_section_registry/notebook.py +19 -0
  31. mito_ai/completions/prompt_builders/prompt_section_registry/rules.py +39 -0
  32. mito_ai/completions/prompt_builders/{utils.py → prompt_section_registry/selected_context.py} +51 -42
  33. mito_ai/completions/prompt_builders/prompt_section_registry/streamlit_app_status.py +25 -0
  34. mito_ai/completions/prompt_builders/prompt_section_registry/task.py +12 -0
  35. mito_ai/completions/prompt_builders/prompt_section_registry/variables.py +18 -0
  36. mito_ai/completions/prompt_builders/scratchpad_result_prompt.py +17 -0
  37. mito_ai/completions/prompt_builders/smart_debug_prompt.py +48 -63
  38. mito_ai/constants.py +0 -3
  39. mito_ai/tests/completions/test_prompt_section_registry.py +44 -0
  40. mito_ai/tests/message_history/test_message_history_utils.py +273 -340
  41. mito_ai/tests/providers/test_anthropic_client.py +7 -3
  42. mito_ai/utils/message_history_utils.py +68 -44
  43. mito_ai/utils/provider_utils.py +8 -1
  44. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/build_log.json +102 -102
  45. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/package.json +2 -2
  46. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/package.json.orig +1 -1
  47. mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.49c79c62671528877c61.js → mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js +2778 -297
  48. mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.9d26322f3e78beb2b666.js.map +1 -0
  49. mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9dfbffc3592eb6f0aef9.js → mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js +17 -17
  50. mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.9dfbffc3592eb6f0aef9.js.map → mito_ai-0.1.57.data/data/share/jupyter/labextensions/mito_ai/static/remoteEntry.79c1ea8a3cda73a4cb6f.js.map +1 -1
  51. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.css +7 -2
  52. {mito_ai-0.1.55.dist-info → mito_ai-0.1.57.dist-info}/METADATA +5 -1
  53. {mito_ai-0.1.55.dist-info → mito_ai-0.1.57.dist-info}/RECORD +78 -56
  54. mito_ai-0.1.55.data/data/share/jupyter/labextensions/mito_ai/static/lib_index_js.49c79c62671528877c61.js.map +0 -1
  55. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/etc/jupyter/jupyter_server_config.d/mito_ai.json +0 -0
  56. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/schemas/mito_ai/toolbar-buttons.json +0 -0
  57. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js +0 -0
  58. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/node_modules_process_browser_js.4b128e94d31a81ebd209.js.map +0 -0
  59. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/style.js +0 -0
  60. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js +0 -0
  61. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/style_index_js.f5d476ac514294615881.js.map +0 -0
  62. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js +0 -0
  63. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_apis_signOut_mjs-node_module-75790d.688c25857e7b81b1740f.js.map +0 -0
  64. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js +0 -0
  65. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_auth_dist_esm_providers_cognito_tokenProvider_tokenProvider_-72f1c8.a917210f057fcfe224ad.js.map +0 -0
  66. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js +0 -0
  67. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_dist_esm_index_mjs.6bac1a8c4cc93f15f6b7.js.map +0 -0
  68. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js +0 -0
  69. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_aws-amplify_ui-react_dist_esm_index_mjs.4fcecd65bef9e9847609.js.map +0 -0
  70. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js +0 -0
  71. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_react-dom_client_js-node_modules_aws-amplify_ui-react_dist_styles_css.b43d4249e4d3dac9ad7b.js.map +0 -0
  72. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js +0 -0
  73. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_semver_index_js.3f6754ac5116d47de76b.js.map +0 -0
  74. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js +0 -0
  75. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/static/vendors-node_modules_vscode-diff_dist_index_js.ea55f1f9346638aafbcf.js.map +0 -0
  76. {mito_ai-0.1.55.data → mito_ai-0.1.57.data}/data/share/jupyter/labextensions/mito_ai/themes/mito_ai/index.js +0 -0
  77. {mito_ai-0.1.55.dist-info → mito_ai-0.1.57.dist-info}/WHEEL +0 -0
  78. {mito_ai-0.1.55.dist-info → mito_ai-0.1.57.dist-info}/entry_points.txt +0 -0
  79. {mito_ai-0.1.55.dist-info → mito_ai-0.1.57.dist-info}/licenses/LICENSE +0 -0
mito_ai/__init__.py CHANGED
@@ -19,6 +19,7 @@ from mito_ai.app_manager.handlers import AppManagerHandler
19
19
  from mito_ai.file_uploads.urls import get_file_uploads_urls
20
20
  from mito_ai.user.urls import get_user_urls
21
21
  from mito_ai.chat_history.urls import get_chat_history_urls
22
+ from mito_ai.chart_wizard.urls import get_chart_wizard_urls
22
23
 
23
24
  # Force Matplotlib to use the Jupyter inline backend.
24
25
  # Background: importing Streamlit sets os.environ["MPLBACKEND"] = "Agg" very early.
@@ -109,6 +110,7 @@ def _load_jupyter_server_extension(server_app) -> None: # type: ignore
109
110
  handlers.extend(get_file_uploads_urls(base_url)) # type: ignore
110
111
  handlers.extend(get_user_urls(base_url)) # type: ignore
111
112
  handlers.extend(get_chat_history_urls(base_url, global_message_history)) # type: ignore
113
+ handlers.extend(get_chart_wizard_urls(base_url, open_ai_provider)) # type: ignore
112
114
 
113
115
  web_app.add_handlers(host_pattern, handlers)
114
116
  server_app.log.info("Loaded the mito_ai server extension")
mito_ai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # This file is auto-generated by Hatchling. As such, do not:
2
2
  # - modify
3
3
  # - track in version control e.g. be sure to add to .gitignore
4
- __version__ = VERSION = '0.1.55'
4
+ __version__ = VERSION = '0.1.57'
@@ -7,7 +7,7 @@ from typing import Dict, Any, Optional, Tuple, Union, Callable, List, cast
7
7
 
8
8
  from anthropic.types import Message, MessageParam, TextBlockParam
9
9
  from mito_ai.completions.models import ResponseFormatInfo, CompletionReply, CompletionStreamChunk, CompletionItem, MessageType
10
- from mito_ai.constants import MESSAGE_HISTORY_TRIM_THRESHOLD
10
+ from mito_ai.completions.prompt_builders.prompt_section_registry import get_max_trim_after_messages
11
11
  from openai.types.chat import ChatCompletionMessageParam
12
12
  from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_server, select_correct_model, stream_anthropic_completion_from_mito_server, get_anthropic_completion_function_params
13
13
 
@@ -15,6 +15,10 @@ from mito_ai.utils.anthropic_utils import get_anthropic_completion_from_mito_ser
15
15
  # We set it to a high number so that we can edit large code cells
16
16
  MAX_TOKENS = 64_000
17
17
 
18
+ # Calculate the max trim threshold once at module load time.
19
+ # This is used for cache boundary calculation - messages older than this threshold are stable.
20
+ MAX_TRIM_THRESHOLD = get_max_trim_after_messages()
21
+
18
22
  def extract_and_parse_anthropic_json_response(response: Message) -> Union[object, Any]:
19
23
  """
20
24
  Extracts and parses the JSON response from the Claude API.
@@ -170,9 +174,6 @@ def get_anthropic_system_prompt_and_messages_with_caching(messages: List[ChatCom
170
174
  1. System prompt (static) → Always cached
171
175
  2. Stable conversation history → Cache at keep_recent boundary
172
176
  3. Recent messages → Never cached (always fresh)
173
-
174
- The keep_recent parameter determines which messages are stable and won't be trimmed.
175
- We cache at the keep_recent boundary because those messages are guaranteed to be stable.
176
177
  """
177
178
 
178
179
  # Get the base system prompt and messages
@@ -189,14 +190,14 @@ def get_anthropic_system_prompt_and_messages_with_caching(messages: List[ChatCom
189
190
  }]
190
191
 
191
192
  # 2. Cache conversation history at the boundary where the messages are stable.
192
- # Messages are stable after they are more than MESSAGE_HISTORY_TRIM_THRESHOLD old.
193
+ # Messages are stable after they are older than the max trim_after_messages threshold.
193
194
  # At this point, the messages are not edited anymore, so they will not invalidate the cache.
194
195
  # If we included the messages before the boundary in the cache, then every time we send a new
195
196
  # message, we would invalidate the cache and we would never get a cache hit except for the system prompt.
196
197
  messages_with_cache = []
197
198
 
198
199
  if len(anthropic_messages) > 0:
199
- cache_boundary = len(anthropic_messages) - MESSAGE_HISTORY_TRIM_THRESHOLD - 1
200
+ cache_boundary = len(anthropic_messages) - MAX_TRIM_THRESHOLD - 1
200
201
 
201
202
  # Add all messages, but only add cache_control to the message at the boundary
202
203
  for i, msg in enumerate(anthropic_messages):
@@ -0,0 +1,3 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
@@ -0,0 +1,52 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ import json
5
+ import tornado
6
+ from typing import List
7
+ from jupyter_server.base.handlers import APIHandler
8
+ from openai.types.chat import ChatCompletionMessageParam
9
+ from mito_ai.completions.providers import OpenAIProvider
10
+ from mito_ai.utils.anthropic_utils import FAST_ANTHROPIC_MODEL
11
+ from mito_ai.completions.models import MessageType
12
+ from mito_ai.completions.prompt_builders.chart_conversion_prompt import create_chart_conversion_prompt
13
+
14
+ class ChartWizardHandler(APIHandler):
15
+ def initialize(self, llm: OpenAIProvider) -> None:
16
+ """Initialize the handler with the LLM provider."""
17
+ super().initialize()
18
+ self._llm = llm
19
+
20
+ @tornado.web.authenticated
21
+ async def post(self) -> None:
22
+ """POST endpoint that receives code from the frontend and sends it to LLM."""
23
+ try:
24
+ data = json.loads(self.request.body.decode('utf-8'))
25
+ code = data.get('code', '')
26
+
27
+ # Create prompt using the prompt builder
28
+ prompt = create_chart_conversion_prompt(code)
29
+
30
+ # Call LLM
31
+ messages: List[ChatCompletionMessageParam] = [{"role": "user", "content": prompt}]
32
+ converted_code = await self._llm.request_completions(
33
+ messages=messages,
34
+ model=FAST_ANTHROPIC_MODEL,
35
+ message_type=MessageType.CHAT,
36
+ thread_id=None
37
+ )
38
+
39
+ # Return the converted code
40
+ self.write({
41
+ "message": "Code converted successfully",
42
+ "converted_code": converted_code
43
+ })
44
+ self.finish()
45
+ except json.JSONDecodeError:
46
+ self.set_status(400)
47
+ self.write({"error": "Invalid JSON in request body"})
48
+ self.finish()
49
+ except Exception as e:
50
+ self.set_status(500)
51
+ self.write({"error": str(e)})
52
+ self.finish()
@@ -0,0 +1,23 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List, Tuple, Any
5
+ from jupyter_server.utils import url_path_join
6
+ from mito_ai.chart_wizard.handlers import ChartWizardHandler
7
+ from mito_ai.completions.providers import OpenAIProvider
8
+
9
+
10
+ def get_chart_wizard_urls(base_url: str, llm: OpenAIProvider) -> List[Tuple[str, Any, dict]]:
11
+ """Get all chart wizard related URL patterns.
12
+
13
+ Args:
14
+ base_url: The base URL for the Jupyter server
15
+ llm: The OpenAI provider instance
16
+
17
+ Returns:
18
+ List of (url_pattern, handler_class, handler_kwargs) tuples
19
+ """
20
+ BASE_URL = base_url + "/mito-ai"
21
+ return [
22
+ (url_path_join(BASE_URL, "chart-wizard"), ChartWizardHandler, {"llm": llm}),
23
+ ]
@@ -3,11 +3,20 @@
3
3
 
4
4
  from typing import Protocol, TypeVar
5
5
  from abc import abstractmethod, ABCMeta
6
- from mito_ai.completions.models import ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
6
+ from mito_ai.completions.models import ChatMessageMetadata, ScratchpadResultMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, InlineCompleterMetadata, AgentSmartDebugMetadata
7
7
  from mito_ai.completions.providers import OpenAIProvider
8
8
  from mito_ai.completions.message_history import GlobalMessageHistory
9
9
 
10
- T = TypeVar('T', ChatMessageMetadata, SmartDebugMetadata, CodeExplainMetadata, AgentExecutionMetadata, AgentSmartDebugMetadata, InlineCompleterMetadata, contravariant=True)
10
+ T = TypeVar('T',
11
+ ChatMessageMetadata,
12
+ SmartDebugMetadata,
13
+ CodeExplainMetadata,
14
+ AgentExecutionMetadata,
15
+ AgentSmartDebugMetadata,
16
+ InlineCompleterMetadata,
17
+ ScratchpadResultMetadata,
18
+ contravariant=True
19
+ )
11
20
 
12
21
  class CompletionHandler(Protocol[T], metaclass=ABCMeta):
13
22
  """Protocol defining the interface for completion handlers.
@@ -0,0 +1,66 @@
1
+ # Copyright (c) Saga Inc.
2
+ # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
+
4
+ from typing import List, Literal, Union
5
+ from openai.types.chat import ChatCompletionMessageParam
6
+ from mito_ai.completions.models import ScratchpadResultMetadata, MessageType, ResponseFormatInfo, AgentResponse
7
+ from mito_ai.completions.prompt_builders.scratchpad_result_prompt import create_scratchpad_result_prompt
8
+ from mito_ai.completions.providers import OpenAIProvider
9
+ from mito_ai.completions.message_history import GlobalMessageHistory
10
+ from mito_ai.completions.completion_handlers.completion_handler import CompletionHandler
11
+ from mito_ai.completions.completion_handlers.utils import append_agent_system_message, create_ai_optimized_message
12
+
13
+ __all__ = ["get_scratchpad_result_completion"]
14
+
15
+ class ScratchpadResultHandler(CompletionHandler[ScratchpadResultMetadata]):
16
+ """Handler for scratchpad result completions."""
17
+
18
+ @staticmethod
19
+ async def get_completion(
20
+ metadata: ScratchpadResultMetadata,
21
+ provider: OpenAIProvider,
22
+ message_history: GlobalMessageHistory,
23
+ model: str
24
+ ) -> str:
25
+ """Get a scratchpad result completion from the AI provider."""
26
+
27
+ if metadata.index is not None:
28
+ message_history.truncate_histories(
29
+ thread_id=metadata.threadId,
30
+ index=metadata.index
31
+ )
32
+
33
+ # Add the system message if it doesn't already exist
34
+ await append_agent_system_message(message_history, model, provider, metadata.threadId, True)
35
+
36
+ # Create the prompt
37
+ prompt = create_scratchpad_result_prompt(metadata)
38
+ display_prompt = ""
39
+
40
+ # Add the prompt to the message history
41
+ new_ai_optimized_message = create_ai_optimized_message(prompt, None, None)
42
+ new_display_optimized_message: ChatCompletionMessageParam = {"role": "user", "content": display_prompt}
43
+
44
+ await message_history.append_message(new_ai_optimized_message, new_display_optimized_message, model, provider, metadata.threadId)
45
+
46
+ # Get the completion
47
+ completion = await provider.request_completions(
48
+ messages=message_history.get_ai_optimized_history(metadata.threadId),
49
+ model=model,
50
+ response_format_info=ResponseFormatInfo(
51
+ name='agent_response',
52
+ format=AgentResponse
53
+ ),
54
+ message_type=MessageType.AGENT_SCRATCHPAD_RESULT,
55
+ user_input="",
56
+ thread_id=metadata.threadId
57
+ )
58
+
59
+ ai_response_message: ChatCompletionMessageParam = {"role": "assistant", "content": completion}
60
+
61
+ await message_history.append_message(ai_response_message, ai_response_message, model, provider, metadata.threadId)
62
+
63
+ return completion
64
+
65
+ # Use the static method directly
66
+ get_scratchpad_result_completion = ScratchpadResultHandler.get_completion
@@ -34,6 +34,7 @@ from mito_ai.completions.models import (
34
34
  CodeExplainMetadata,
35
35
  AgentExecutionMetadata,
36
36
  InlineCompleterMetadata,
37
+ ScratchpadResultMetadata,
37
38
  MessageType
38
39
  )
39
40
  from mito_ai.completions.providers import OpenAIProvider
@@ -45,6 +46,7 @@ from mito_ai.completions.completion_handlers.code_explain_handler import get_cod
45
46
  from mito_ai.completions.completion_handlers.inline_completer_handler import get_inline_completion
46
47
  from mito_ai.completions.completion_handlers.agent_execution_handler import get_agent_execution_completion
47
48
  from mito_ai.completions.completion_handlers.agent_auto_error_fixup_handler import get_agent_auto_error_fixup_completion
49
+ from mito_ai.completions.completion_handlers.scratchpad_result_handler import get_scratchpad_result_completion
48
50
  from mito_ai.utils.telemetry_utils import identify
49
51
 
50
52
  FALLBACK_MODEL = "gpt-4.1" # Default model to use for safety
@@ -314,6 +316,9 @@ class CompletionHandler(JupyterHandler, WebSocketHandler):
314
316
  elif type == MessageType.AGENT_AUTO_ERROR_FIXUP:
315
317
  agent_auto_error_fixup_metadata = AgentSmartDebugMetadata(**metadata_dict)
316
318
  completion = await get_agent_auto_error_fixup_completion(agent_auto_error_fixup_metadata, self._llm, self._message_history, model)
319
+ elif type == MessageType.AGENT_SCRATCHPAD_RESULT:
320
+ scratchpad_result_metadata = ScratchpadResultMetadata(**metadata_dict)
321
+ completion = await get_scratchpad_result_completion(scratchpad_result_metadata, self._llm, self._message_history, model)
317
322
  elif type == MessageType.INLINE_COMPLETION:
318
323
  inline_completer_metadata = InlineCompleterMetadata(**metadata_dict)
319
324
  completion = await get_inline_completion(inline_completer_metadata, self._llm, self._message_history, model)
@@ -17,7 +17,7 @@ ThreadID = NewType('ThreadID', str)
17
17
 
18
18
  class CellUpdate(BaseModel):
19
19
  type: Literal['modification', 'new']
20
- index: Optional[int]
20
+ after_cell_id: Optional[str]
21
21
  id: Optional[str]
22
22
  code: str
23
23
  code_summary: str
@@ -29,13 +29,26 @@ class CellUpdate(BaseModel):
29
29
  # for now and rely on the AI to respond with the correct types, following the format
30
30
  # that we show it in the system prompt.
31
31
  class AgentResponse(BaseModel):
32
- type: Literal['cell_update', 'get_cell_output', 'run_all_cells', 'finished_task', 'create_streamlit_app', 'edit_streamlit_app']
32
+ type: Literal[
33
+ 'cell_update',
34
+ 'get_cell_output',
35
+ 'run_all_cells',
36
+ 'finished_task',
37
+ 'create_streamlit_app',
38
+ 'edit_streamlit_app',
39
+ 'ask_user_question',
40
+ 'scratchpad',
41
+ ]
33
42
  message: str
34
43
  cell_update: Optional[CellUpdate]
35
44
  get_cell_output_cell_id: Optional[str]
36
45
  next_steps: Optional[List[str]]
37
46
  analysis_assumptions: Optional[List[str]]
38
47
  streamlit_app_prompt: Optional[str]
48
+ question: Optional[str]
49
+ answers: Optional[List[str]]
50
+ scratchpad_code: Optional[str]
51
+ scratchpad_summary: Optional[str]
39
52
 
40
53
 
41
54
  @dataclass(frozen=True)
@@ -67,6 +80,7 @@ class MessageType(Enum):
67
80
  STREAMLIT_CONVERSION = "streamlit_conversion"
68
81
  STOP_AGENT = "stop_agent"
69
82
  DEPLOY_APP = "deploy_app"
83
+ AGENT_SCRATCHPAD_RESULT = "agent:scratchpad-result"
70
84
 
71
85
 
72
86
  @dataclass(frozen=True)
@@ -136,13 +150,20 @@ class CodeExplainMetadata():
136
150
  activeCellCode: Optional[str] = None
137
151
 
138
152
  @dataclass(frozen=True)
139
- class InlineCompleterMetadata():
153
+ class InlineCompleterMetadata():
140
154
  promptType: Literal['inline_completion']
141
155
  prefix: str
142
156
  suffix: str
143
157
  variables: Optional[List[str]] = None
144
158
  files: Optional[List[str]] = None
145
159
 
160
+ @dataclass(frozen=True)
161
+ class ScratchpadResultMetadata():
162
+ promptType: Literal['agent:scratchpad-result']
163
+ threadId: ThreadID
164
+ scratchpadResult: str
165
+ index: Optional[int] = None
166
+
146
167
  @dataclass(frozen=True)
147
168
  class CompletionRequest:
148
169
  """
@@ -1,57 +1,25 @@
1
1
  # Copyright (c) Saga Inc.
2
2
  # Distributed under the terms of the GNU Affero General Public License v3.0 License.
3
3
 
4
+ from typing import List
4
5
  from mito_ai.completions.models import AgentExecutionMetadata
5
- from mito_ai.completions.prompt_builders.prompt_constants import (
6
- ACTIVE_CELL_ID_SECTION_HEADING,
7
- FILES_SECTION_HEADING,
8
- JUPYTER_NOTEBOOK_SECTION_HEADING,
9
- STREAMLIT_APP_STATUS_SECTION_HEADING,
10
- VARIABLES_SECTION_HEADING,
11
- cell_update_output_str
12
- )
13
- from mito_ai.completions.prompt_builders.utils import (
14
- get_rules_str,
15
- get_selected_context_str,
16
- get_streamlit_app_status_str
17
- )
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
7
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
18
8
 
19
9
 
20
10
  def create_agent_execution_prompt(md: AgentExecutionMetadata) -> str:
21
- variables_str = '\n'.join([f"{variable}" for variable in md.variables or []])
22
- files_str = '\n'.join([f"{file}" for file in md.files or []])
23
- ai_optimized_cells_str = '\n'.join([f"{cell}" for cell in md.aiOptimizedCells or []])
24
- rules_str = get_rules_str(md.additionalContext)
25
- selected_context_str = get_selected_context_str(md.additionalContext)
26
-
27
-
28
- streamlit_status_str = get_streamlit_app_status_str(md.notebookID, md.notebookPath)
29
-
30
- context_str = f"""Remember to choose the correct tool to respond with.
31
-
32
- {rules_str}
33
-
34
-
35
- {JUPYTER_NOTEBOOK_SECTION_HEADING}
36
- {ai_optimized_cells_str}
37
-
38
- {VARIABLES_SECTION_HEADING}
39
- {variables_str}
40
-
41
- {FILES_SECTION_HEADING}
42
- {files_str}
43
-
44
- {STREAMLIT_APP_STATUS_SECTION_HEADING}
45
- {streamlit_status_str}
46
-
47
- {ACTIVE_CELL_ID_SECTION_HEADING}
48
- {md.activeCellId}
49
-
50
- {selected_context_str}
51
-
52
- {cell_update_output_str(md.base64EncodedActiveCellOutput is not None)}"""
53
-
54
- task_str = '' if md.input == '' else f"""Your task:
55
- {md.input}"""
56
-
57
- return '\n\n'.join([context_str, task_str]).strip()
11
+ sections: List[PromptSection] = [
12
+ SG.Generic("Reminder", "Remember to choose the correct tool to respond with."),
13
+ SG.Rules(md.additionalContext),
14
+ SG.StreamlitAppStatus(md.notebookID, md.notebookPath),
15
+ SG.Files(md.files),
16
+ SG.Variables(md.variables),
17
+ SG.SelectedContext(md.additionalContext),
18
+ SG.ActiveCellId(md.activeCellId),
19
+ SG.Notebook(md.aiOptimizedCells),
20
+ SG.GetCellOutputToolResponse(md.base64EncodedActiveCellOutput),
21
+ SG.Task(f"{md.input}"),
22
+ ]
23
+
24
+ prompt = Prompt(sections)
25
+ return str(prompt)
@@ -3,11 +3,8 @@
3
3
 
4
4
  from typing import List
5
5
  from mito_ai.completions.models import AgentSmartDebugMetadata
6
- from mito_ai.completions.prompt_builders.prompt_constants import (
7
- FILES_SECTION_HEADING,
8
- JUPYTER_NOTEBOOK_SECTION_HEADING,
9
- VARIABLES_SECTION_HEADING
10
- )
6
+ from mito_ai.completions.prompt_builders.prompt_section_registry import SG, Prompt
7
+ from mito_ai.completions.prompt_builders.prompt_section_registry.base import PromptSection
11
8
 
12
9
  # TODO:
13
10
  # 1. In the future, it might make sense to pass the previous CELL_UPDATE to this prompt?
@@ -18,22 +15,21 @@ from mito_ai.completions.prompt_builders.prompt_constants import (
18
15
  # graph of cells that we calculate ourselves, not relying on the AI.
19
16
 
20
17
  def create_agent_smart_debug_prompt(md: AgentSmartDebugMetadata) -> str:
21
- variables_str = '\n'.join([f"{variable}" for variable in md.variables or []])
22
- files_str = '\n'.join([f"{file}" for file in md.files or []])
23
- ai_optimized_cells_str = '\n'.join([f"{cell}" for cell in md.aiOptimizedCells or []])
18
+ sections: List[PromptSection] = []
24
19
 
25
- return f"""I just applied and executed the CELL_UPDATE that you just shared with me, but it errored. Below I am sharing with you a strategy for how I want you to resolve this error and information about the actual error that occured.
20
+ # Add intro text
21
+ sections.append(SG.Generic("Instructions", f"""I just applied and executed the CELL_UPDATE that you just shared with me, but it errored. Below I am sharing with you a strategy for how I want you to resolve this error and information about the actual error that occured.
26
22
 
27
23
  Use this strategy for this message only. After this message, continue using the original set of instructions that I provided you.
28
24
 
29
- It is very important that When fixing this error, you do not change the original intent of the code cell.
25
+ It is very important that when fixing this error, you do not change the original intent of the code cell.
30
26
 
31
27
  To fix this error, take the following approach:
32
28
  Step 1: ERROR ANALYSIS: Analyze the error message to identify why the code cell errored.
33
29
  Step 2: INTENT PRESERVATION: Make sure you understand the intent of the CELL_UPDATE so that you can be sure to preserve it when you create a new CELL_UPDATE
34
- Step 3: ERROR CORRECTION: Respond with a new CELL_UPDATE that is applied to the same cell as the erroring CELL_UPDATE.
30
+ Step 3: ERROR CORRECTION: Respond with a new CELL_UPDATE that is applied to the same cell as the erroring CELL_UPDATE or use the ASK_USER_QUESTION tool to get more information about how to proceed.
35
31
 
36
- <Instructions for each Phase />
32
+ INSTRUCTIONS FOR EACH PHASE
37
33
 
38
34
  ERROR ANALYSIS:
39
35
 
@@ -47,13 +43,15 @@ INTENT PRESERVATION:
47
43
 
48
44
  ERROR CORRECTION:
49
45
 
50
- - Return the full, updated version of cell {md.error_message_producing_code_cell_id} with the error fixed and a short explanation of the error.
46
+ - Use one of your tools to correct the error or get more information from the user on how to proceed.
47
+ - If you use the CELL_UPDATE tool, you must reutn the full updated version of cell {md.error_message_producing_code_cell_id} with the error fixed and a short explanation of the error.
51
48
  - You can only update code in {md.error_message_producing_code_cell_id}. You are unable to edit the code in any other cell when resolving this error.
52
49
  - Propose a solution that fixes the error and does not change the user's intent.
53
50
  - Make the solution as simple as possible.
54
51
  - Reuse as much of the existing code as possible.
55
52
  - DO NOT ADD TEMPORARY COMMENTS like '# Fixed the typo here' or '# Added this line to fix the error'
56
53
  - If you encounter a ModuleNotFoundError, you can install the package by adding the the following line to the top of the code cell: `!pip install <package_name> --quiet`.
54
+ - If the error is not resolvable without getting more information from the user, you can respond with a ASK_USER_QUESTION tool call.
57
55
  - If you encounter a NameError, you can use the RUN_ALL_CELLS tool to run all cells from the top of the notebook to the bottom to bring the variable into scope.
58
56
  RUN_ALL_CELLS:
59
57
  When you want to execute all cells in the notebook from top to bottom, respond with this format:
@@ -61,100 +59,89 @@ ERROR CORRECTION:
61
59
  type: 'run_all_cells',
62
60
  message: str
63
61
  }}
64
- Note that if the name error persists even after using run_all_cells, it means that the variable is not defined in the notebook and you should not reuse this tool. Additionally, this tool could also be used to refresh the notebook state.
65
-
66
- <Example>
67
-
68
- <Input>
69
-
70
- {FILES_SECTION_HEADING}
71
- file_name: sales.csv
72
-
73
- Jupyter Notebook:
74
- [
75
- {{
76
- cell_type: 'markdown'
77
- id: '9e38c62b-38f8-457d-bb8d-28bfc52edf2c'
78
- code: \"\"\"# Transaction Analysis \"\"\"
79
- }},
80
- {{
81
- cell_type: 'code'
82
- id: 'adslkaf-jf73-l8xn-92j7-kjd8kdcnd2kso'
83
- code: \"\"\" 'df' = pd.DataFrame({{
84
- 'order_id': [1, 2, 3, 4],
85
- 'date': ['Mar 7, 2025', 'Sep 24, 2024', '25 June, 2024', 'June 29, 2024'],
86
- 'amount': [100, 150, 299, 99]
87
- }})
88
- }},
62
+ Note that if the name error persists even after using run_all_cells, it means that the variable is not defined in the notebook and you should not reuse this tool. Additionally, this tool could also be used to refresh the notebook state."""))
63
+
64
+ # Add example
65
+ example_content = f"""
66
+ <Input>
67
+
68
+ Files:
69
+ "file_name: sales.csv"
70
+
71
+ Jupyter Notebook:
72
+ [
73
+ {{
74
+ cell_type: 'markdown'
75
+ id: '9e38c62b-38f8-457d-bb8d-28bfc52edf2c'
76
+ code: \"\"\"# Transaction Analysis \"\"\"
77
+ }},
78
+ {{
79
+ cell_type: 'code'
80
+ id: 'adslkaf-jf73-l8xn-92j7-kjd8kdcnd2kso'
81
+ code: \"\"\" 'df' = pd.DataFrame({{
82
+ 'order_id': [1, 2, 3, 4],
83
+ 'date': ['Mar 7, 2025', 'Sep 24, 2024', '25 June, 2024', 'June 29, 2024'],
84
+ 'amount': [100, 150, 299, 99]
85
+ }})
86
+ }},
87
+ {{
88
+ cell_type: 'code'
89
+ id: 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
90
+ code: \"\"\"df['date'] = pd.to_datetime(df['date'])\"\"\"
91
+ }},
92
+ ]
93
+
94
+ Variables:
89
95
  {{
90
- cell_type: 'code'
91
- id: 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
92
- code: \"\"\"df['date'] = pd.to_datetime(df['date'])\"\"\"
93
- }},
94
- ]
95
-
96
- {VARIABLES_SECTION_HEADING}
97
- {{
98
- 'df': pd.DataFrame({{
99
- 'order_id': [1, 2, 3, 4],
100
- 'date': ['Mar 7, 2025', 'Sep 24, 2024', '25 June, 2024', 'June 29, 2024'],
101
- 'amount': [100, 150, 299, 99]
102
- }})
103
- }}
104
-
105
- Cell ID of the Error Producing Code Cell:
106
- 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
96
+ 'df': pd.DataFrame({{
97
+ 'order_id': [1, 2, 3, 4],
98
+ 'date': ['Mar 7, 2025', 'Sep 24, 2024', '25 June, 2024', 'June 29, 2024'],
99
+ 'amount': [100, 150, 299, 99]
100
+ }})
101
+ }}
107
102
 
108
- Error Traceback:
109
- Cell In[27], line 1
110
- ----> 1 df['date'] = pd.to_datetime(df['date'])
103
+ Cell ID of the Error Producing Code Cell:
104
+ 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
111
105
 
112
- ValueError: time data "25 June, 2024" doesn't match format "%b %d, %Y", at position 2. You might want to try:
113
- - passing `format` if your strings have a consistent format;
114
- - passing `format='ISO8601'` if your strings are all ISO8601 but not necessarily in exactly the same format;
115
- - passing `format='mixed'`, and the format will be inferred for each element individually. You might want to use `dayfirst` alongside this.
106
+ Error Traceback:
107
+ Cell In[27], line 1
108
+ ----> 1 df['date'] = pd.to_datetime(df['date'])
116
109
 
110
+ ValueError: time data "25 June, 2024" doesn't match format "%b %d, %Y", at position 2. You might want to try:
111
+ - passing `format` if your strings have a consistent format;
112
+ - passing `format='ISO8601'` if your strings are all ISO8601 but not necessarily in exactly the same format;
113
+ - passing `format='mixed'`, and the format will be inferred for each element individually. You might want to use `dayfirst` alongside this.
117
114
 
118
- </ Input>
119
115
 
120
- < Your Thinking >
116
+ </ Input>
121
117
 
122
- ERROR ANALYSIS
123
- This is a ValueError caused by applying the wrong format to a specific date string. Because it was triggered at position 2, the first date string must have successfully converted. By looking at the defined variables, I can see that first date string is in the format "Mar 7, 2025", but the third date string is in the format "25 June, 2024". Those dates are not in the same format, so the conversion failed.
118
+ < Your Thinking >
124
119
 
125
- INTENT PRESERVATION:
126
- User is trying to convert the date column to a datetime object even though the dates are not in the same starting format.
120
+ ERROR ANALYSIS
121
+ This is a ValueError caused by applying the wrong format to a specific date string. Because it was triggered at position 2, the first date string must have successfully converted. By looking at the defined variables, I can see that first date string is in the format "Mar 7, 2025", but the third date string is in the format "25 June, 2024". Those dates are not in the same format, so the conversion failed.
127
122
 
128
- </ Your Thinking >
123
+ INTENT PRESERVATION:
124
+ User is trying to convert the date column to a datetime object even though the dates are not in the same starting format.
129
125
 
130
- <Output>
126
+ </ Your Thinking >
131
127
 
128
+ <Output>
132
129
 
133
- {{
134
- is_finished: false,
135
- cell_update: {{
136
- type: 'modification'
137
- id: 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
138
- code: "def parse_date(date_str):\n formats = ['%b %d, %Y', '%d %B, %Y']\n\n for fmt in formats:\n try:\n return pd.to_datetime(date_str, format=fmt)\n except ValueError:\n # Try next format\n continue\n\n # If not format worked, return Not a Time\n return pd.NaT\n\ndf['date'] = df['date'].apply(lambda x: parse_date(x))"
130
+ {{
131
+ is_finished: false,
132
+ cell_update: {{
133
+ type: 'modification'
134
+ id: 'c68fdf19-db8c-46dd-926f-d90ad35bb3bc'
135
+ code: "def parse_date(date_str):\n formats = ['%b %d, %Y', '%d %B, %Y']\n\n for fmt in formats:\n try:\n return pd.to_datetime(date_str, format=fmt)\n except ValueError:\n # Try next format\n continue\n\n # If not format worked, return Not a Time\n return pd.NaT\n\ndf['date'] = df['date'].apply(lambda x: parse_date(x))"
136
+ }}
139
137
  }}
140
- }}
141
-
142
- </Output>
143
-
144
- </Example>
145
-
146
- {FILES_SECTION_HEADING}
147
- {files_str}
148
-
149
- {JUPYTER_NOTEBOOK_SECTION_HEADING}
150
- {ai_optimized_cells_str}
151
-
152
- {VARIABLES_SECTION_HEADING}
153
- {variables_str}
154
138
 
155
- Cell ID of the Error Producing Code Cell:
156
- {md.error_message_producing_code_cell_id}
139
+ </Output>"""
140
+ sections.append(SG.Example("Example", example_content))
141
+ sections.append(SG.Files(md.files))
142
+ sections.append(SG.Notebook(md.aiOptimizedCells))
143
+ sections.append(SG.Variables(md.variables))
144
+ sections.append(SG.ErrorTraceback(md.error_message_producing_code_cell_id, md.errorMessage))
157
145
 
158
- Error Traceback:
159
- {md.errorMessage}
160
- """
146
+ prompt = Prompt(sections)
147
+ return str(prompt)